[Hermon] Add driver for Mellanox Hermon (ConnectX) cards.
authorMichael Brown <mcb30@etherboot.org>
Sat, 23 Feb 2008 10:51:38 +0000 (10:51 +0000)
committerMichael Brown <mcb30@etherboot.org>
Sat, 23 Feb 2008 10:51:38 +0000 (10:51 +0000)
src/drivers/infiniband/MT25408_PRM.h [new file with mode: 0644]
src/drivers/infiniband/hermon.c [new file with mode: 0644]
src/drivers/infiniband/hermon.h [new file with mode: 0644]
src/include/gpxe/errfile.h

diff --git a/src/drivers/infiniband/MT25408_PRM.h b/src/drivers/infiniband/MT25408_PRM.h
new file mode 100644 (file)
index 0000000..17882ed
--- /dev/null
@@ -0,0 +1,3313 @@
+/*
+  This software is available to you under a choice of one of two
+  licenses.  You may choose to be licensed under the terms of the GNU
+  General Public License (GPL) Version 2, available at
+  <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
+  license, available in the LICENSE.TXT file accompanying this
+  software.  These details are also available at
+  <http://openib.org/license.html>.
+
+  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+  BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+  ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  SOFTWARE.
+
+  Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
+*/
+
+/***
+ *** This file was generated at "Mon Apr 16 23:22:02 2007"
+ *** by:
+ ***    % csp_bf -copyright=/mswg/misc/license-header.txt -prefix hermonprm_ -bits -fixnames MT25408_PRM.csp
+ ***/
+
+#ifndef H_prefix_hermonprm_bits_fixnames_MT25408_PRM_csp_H
+#define H_prefix_hermonprm_bits_fixnames_MT25408_PRM_csp_H
+
+/* UD Address Vector */
+
+struct hermonprm_ud_address_vector_st {        /* Little Endian */
+    pseudo_bit_t       pd[0x00018];           /* Protection Domain */
+    pseudo_bit_t       port_number[0x00002];  /* Port number
+                                                 1 - Port 1
+                                                 2 - Port 2
+                                                 other - reserved */
+    pseudo_bit_t       reserved0[0x00005];
+    pseudo_bit_t       fl[0x00001];           /* force loopback */
+/* -------------- */
+    pseudo_bit_t       rlid[0x00010];         /* Remote (Destination) LID */
+    pseudo_bit_t       my_lid_path_bits[0x00007];/* Source LID - the lower 7 bits (upper bits are taken from PortInfo) */
+    pseudo_bit_t       g[0x00001];            /* Global address enable - if set, GRH will be formed for packet header */
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       hop_limit[0x00008];    /* IPv6 hop limit */
+    pseudo_bit_t       max_stat_rate[0x00004];/* Maximum static rate control. 
+                                                 0 - 4X injection rate
+                                                 1 - 1X injection rate
+                                                 other - reserved
+                                                  */
+    pseudo_bit_t       reserved2[0x00004];
+    pseudo_bit_t       mgid_index[0x00007];   /* Index to port GID table
+                                                 mgid_index = (port_number-1) * 2^log_max_gid + gid_index
+                                                 Where:
+                                                 1. log_max_gid is taken from QUERY_DEV_CAP command
+                                                 2. gid_index is the index to the GID table */
+    pseudo_bit_t       reserved3[0x00009];
+/* -------------- */
+    pseudo_bit_t       flow_label[0x00014];   /* IPv6 flow label */
+    pseudo_bit_t       tclass[0x00008];       /* IPv6 TClass */
+    pseudo_bit_t       sl[0x00004];           /* InfiniBand Service Level (SL) */
+/* -------------- */
+    pseudo_bit_t       rgid_127_96[0x00020];  /* Remote GID[127:96] */
+/* -------------- */
+    pseudo_bit_t       rgid_95_64[0x00020];   /* Remote GID[95:64] */
+/* -------------- */
+    pseudo_bit_t       rgid_63_32[0x00020];   /* Remote GID[63:32] */
+/* -------------- */
+    pseudo_bit_t       rgid_31_0[0x00020];    /* Remote GID[31:0] if G bit is set. Must be set to 0x2 if G bit is cleared. */
+/* -------------- */
+}; 
+
+/* Send doorbell */
+
+struct hermonprm_send_doorbell_st {    /* Little Endian */
+    pseudo_bit_t       nopcode[0x00005];      /* Opcode of descriptor to be executed */
+    pseudo_bit_t       f[0x00001];            /* Fence bit. If set, descriptor is fenced */
+    pseudo_bit_t       reserved0[0x00002];
+    pseudo_bit_t       wqe_counter[0x00010];  /* Modulo-64K counter of WQEs posted to the QP since its creation excluding the newly posted WQEs in this doorbell. Should be zero for the first doorbell on the QP */
+    pseudo_bit_t       wqe_cnt[0x00008];      /* Number of WQEs posted with this doorbell. Must be grater then zero. */
+/* -------------- */
+    pseudo_bit_t       nds[0x00006];          /* Next descriptor size (in 16-byte chunks) */
+    pseudo_bit_t       reserved1[0x00002];
+    pseudo_bit_t       qpn[0x00018];          /* QP number this doorbell is rung on */
+/* -------------- */
+}; 
+
+/* Send wqe segment data inline */
+
+struct hermonprm_wqe_segment_data_inline_st {  /* Little Endian */
+    pseudo_bit_t       byte_count[0x0000a];   /* Not including padding for 16Byte chunks */
+    pseudo_bit_t       reserved0[0x00015];
+    pseudo_bit_t       always1[0x00001];
+/* -------------- */
+    pseudo_bit_t       data[0x00018];         /* Data may be more this segment size - in 16Byte chunks */
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00040];
+/* -------------- */
+}; 
+
+/* Send wqe segment data ptr */
+
+struct hermonprm_wqe_segment_data_ptr_st {     /* Little Endian */
+    pseudo_bit_t       byte_count[0x0001f];
+    pseudo_bit_t       always0[0x00001];
+/* -------------- */
+    pseudo_bit_t       l_key[0x00020];
+/* -------------- */
+    pseudo_bit_t       local_address_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       local_address_l[0x00020];
+/* -------------- */
+}; 
+
+/* Send wqe segment rd */
+
+struct hermonprm_local_invalidate_segment_st { /* Little Endian */
+    pseudo_bit_t       reserved0[0x00040];
+/* -------------- */
+    pseudo_bit_t       mem_key[0x00018];
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x000a0];
+/* -------------- */
+}; 
+
+/* Fast_Registration_Segment   ####michal - doesn't match PRM (fields were added, see below) new table size in bytes -  0x30 */
+
+struct hermonprm_fast_registration_segment_st {        /* Little Endian */
+    pseudo_bit_t       reserved0[0x0001b];
+    pseudo_bit_t       lr[0x00001];           /* If set - Local Read access will be enabled */
+    pseudo_bit_t       lw[0x00001];           /* If set - Local Write access will be enabled */
+    pseudo_bit_t       rr[0x00001];           /* If set - Remote Read access will be enabled */
+    pseudo_bit_t       rw[0x00001];           /* If set - Remote Write access will be enabled */
+    pseudo_bit_t       a[0x00001];            /* If set - Remote Atomic access will be enabled */
+/* -------------- */
+    pseudo_bit_t       pbl_ptr_63_32[0x00020];/* Physical address pointer [63:32] to the physical buffer list  ### michal - this field is replaced with mem_key .32 */
+/* -------------- */
+    pseudo_bit_t       mem_key[0x00020];      /* Memory Key on which the fast registration is executed on. ###michal-this field is replaced with pbl_ptr_63_32 */
+/* -------------- */
+    pseudo_bit_t       page_size[0x00005];    /* Page size used for the region. Actual size is [4K]*2^Page_size bytes.
+                                                 page_size should be less than 20. ###michal - field doesn't exsist (see replacement above) */
+    pseudo_bit_t       reserved1[0x00002];
+    pseudo_bit_t       zb[0x00001];           /* Zero Based Region               ###michal - field doesn't exsist (see replacement above) */
+    pseudo_bit_t       pbl_ptr_31_8[0x00018]; /* Physical address pointer [31:8] to the physical buffer list    ###michal - field doesn't exsist (see replacement above) */
+/* -------------- */
+    pseudo_bit_t       start_address_h[0x00020];/* Start Address[63:32] - Virtual Address where this region starts */
+/* -------------- */
+    pseudo_bit_t       start_address_l[0x00020];/* Start Address[31:0] - Virtual Address where this region starts */
+/* -------------- */
+    pseudo_bit_t       reg_len_h[0x00020];    /* Region Length[63:32] */
+/* -------------- */
+    pseudo_bit_t       reg_len_l[0x00020];    /* Region Length[31:0] */
+/* -------------- */
+}; 
+
+/* Send wqe segment atomic */
+
+struct hermonprm_wqe_segment_atomic_st {       /* Little Endian */
+    pseudo_bit_t       swap_add_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       swap_add_l[0x00020];
+/* -------------- */
+    pseudo_bit_t       compare_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       compare_l[0x00020];
+/* -------------- */
+}; 
+
+/* Send wqe segment remote address */
+
+struct hermonprm_wqe_segment_remote_address_st {       /* Little Endian */
+    pseudo_bit_t       remote_virt_addr_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       remote_virt_addr_l[0x00020];
+/* -------------- */
+    pseudo_bit_t       rkey[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00020];
+/* -------------- */
+}; 
+
+/* end wqe segment bind */
+
+struct hermonprm_wqe_segment_bind_st { /* Little Endian */
+    pseudo_bit_t       reserved0[0x0001d];
+    pseudo_bit_t       rr[0x00001];           /* If set, Remote Read Enable for bound window. */
+    pseudo_bit_t       rw[0x00001];           /* If set, Remote Write Enable for bound window.
+                                                  */
+    pseudo_bit_t       a[0x00001];            /* If set, Atomic Enable for bound window. */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x0001e];
+    pseudo_bit_t       zb[0x00001];           /* If set, Window is Zero Based. */
+    pseudo_bit_t       type[0x00001];         /* Window type.
+                                                 0 - Type one window
+                                                 1 - Type two window
+                                                  */
+/* -------------- */
+    pseudo_bit_t       new_rkey[0x00020];     /* The new RKey of window to bind */
+/* -------------- */
+    pseudo_bit_t       region_lkey[0x00020];  /* Local key of region, which window will be bound to */
+/* -------------- */
+    pseudo_bit_t       start_address_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       start_address_l[0x00020];
+/* -------------- */
+    pseudo_bit_t       length_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       length_l[0x00020];
+/* -------------- */
+}; 
+
+/* Send wqe segment ud */
+
+struct hermonprm_wqe_segment_ud_st {   /* Little Endian */
+    struct hermonprm_ud_address_vector_st      ud_address_vector;/* UD Address Vector */
+/* -------------- */
+    pseudo_bit_t       destination_qp[0x00018];
+    pseudo_bit_t       reserved0[0x00008];
+/* -------------- */
+    pseudo_bit_t       q_key[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00040];
+/* -------------- */
+}; 
+
+/* Send wqe segment rd */
+
+struct hermonprm_wqe_segment_rd_st {   /* Little Endian */
+    pseudo_bit_t       destination_qp[0x00018];
+    pseudo_bit_t       reserved0[0x00008];
+/* -------------- */
+    pseudo_bit_t       q_key[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00040];
+/* -------------- */
+}; 
+
+/* Send wqe segment ctrl */
+
+struct hermonprm_wqe_segment_ctrl_send_st {    /* Little Endian */
+    pseudo_bit_t       opcode[0x00005];
+    pseudo_bit_t       reserved0[0x0001a];
+    pseudo_bit_t       owner[0x00001];
+/* -------------- */
+    pseudo_bit_t       ds[0x00006];           /* descriptor (wqe) size in 16bytes chunk */
+    pseudo_bit_t       f[0x00001];            /* fence */
+    pseudo_bit_t       reserved1[0x00019];
+/* -------------- */
+    pseudo_bit_t       fl[0x00001];           /* Force LoopBack */
+    pseudo_bit_t       s[0x00001];            /* Remote Solicited Event */
+    pseudo_bit_t       c[0x00002];            /* completion required: 0b00 - no   0b11 - yes */
+    pseudo_bit_t       ip[0x00001];           /* When set, InfiniHost III Ex will calculate the IP checksum of the IP header that is present immediately after the IPoverIB encapsulation header. In the case of multiple headers (encapsulation), InfiniHost III Ex will calculate the checksum only for the first IP header following the IPoverIB encapsulation header. Not Valid for IPv6 packets */
+    pseudo_bit_t       tcp_udp[0x00001];      /* When set, InfiniHost III Ex will calculate the TCP/UDP checksum of the packet that is present immediately after the IP header. In the case of multiple headers (encapsulation), InfiniHost III Ex will calculate the checksum only for the first TCP header following the IP header. This bit may be set only if the entire TCP/UDP segment is present in one IB packet */
+    pseudo_bit_t       reserved2[0x00001];
+    pseudo_bit_t       so[0x00001];           /* Strong Ordering - when set, the WQE will be executed only after all previous WQEs have been executed. Can be set for RC WQEs only. This bit must be set in type two BIND, Fast Registration and Local invalidate operations. */
+    pseudo_bit_t       src_remote_buf[0x00018];
+/* -------------- */
+    pseudo_bit_t       immediate[0x00020];    /* If the OpCode encodes an operation with Immediate (RDMA-write/SEND), This field will hold the Immediate data to be sent. If the OpCode encodes send and invalidate operations, this field holds the Invalidation key to be inserted into the packet; otherwise, this field is reserved. */
+/* -------------- */
+}; 
+
+/* Address Path        # ###michal - match to PRM */
+
+struct hermonprm_address_path_st {     /* Little Endian */
+    pseudo_bit_t       pkey_index[0x00007];   /* PKey table index */
+    pseudo_bit_t       reserved0[0x00016];
+    pseudo_bit_t       sv[0x00001];           /* Service  VLAN on QP */
+    pseudo_bit_t       cv[0x00001];           /* Customer VLAN in QP */
+    pseudo_bit_t       fl[0x00001];           /* Force LoopBack */
+/* -------------- */
+    pseudo_bit_t       rlid[0x00010];         /* Remote (Destination) LID */
+    pseudo_bit_t       my_lid_smac_idx[0x00007];/* Source LID - the lower 7 bits (upper bits are taken from PortInfo) */
+    pseudo_bit_t       grh_ip[0x00001];       /* Global address enable - if set, GRH will be formed for packet header */
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       hop_limit[0x00008];    /* IPv6 hop limit */
+    pseudo_bit_t       max_stat_rate[0x00004];/* Maximum static rate control. 
+                                                 0 - 100% injection rate 
+                                                 1 - 25% injection rate
+                                                 2 - 12.5% injection rate
+                                                 3 - 50% injection rate
+                                                 7: 2.5 Gb/s. 
+                                                 8: 10 Gb/s. 
+                                                 9: 30 Gb/s. 
+                                                 10: 5 Gb/s. 
+                                                 11: 20 Gb/s.
+                                                 12: 40 Gb/s. 
+                                                 13: 60 Gb/s. 
+                                                 14: 80 Gb/s. 
+                                                 15: 120 Gb/s. */
+    pseudo_bit_t       reserved2[0x00004];
+    pseudo_bit_t       mgid_index[0x00007];   /* Index to port GID table */
+    pseudo_bit_t       reserved3[0x00004];
+    pseudo_bit_t       ack_timeout[0x00005];  /* Local ACK timeout - Transport timer for activation of retransmission mechanism. Refer to IB spec Vol1 9.7.6.1.3 for further details.
+                                                 The transport timer is set to 4.096us*2^ack_timeout, if ack_timeout is 0 then transport timer is disabled. */
+/* -------------- */
+    pseudo_bit_t       flow_label[0x00014];   /* IPv6 flow label */
+    pseudo_bit_t       tclass[0x00008];       /* IPv6 TClass */
+    pseudo_bit_t       reserved4[0x00004];
+/* -------------- */
+    pseudo_bit_t       rgid_127_96[0x00020];  /* Remote GID[127:96] */
+/* -------------- */
+    pseudo_bit_t       rgid_95_64[0x00020];   /* Remote GID[95:64] */
+/* -------------- */
+    pseudo_bit_t       rgid_63_32[0x00020];   /* Remote GID[63:32] */
+/* -------------- */
+    pseudo_bit_t       rgid_31_0[0x00020];    /* Remote GID[31:0] */
+/* -------------- */
+    pseudo_bit_t       reserved5[0x00008];
+    pseudo_bit_t       sp[0x00001];           /* if set, spoofing protection is enforced on this QP and Ethertype headers are restricted */
+    pseudo_bit_t       reserved6[0x00002];
+    pseudo_bit_t       fvl[0x00001];          /* force VLAN */
+    pseudo_bit_t       fsip[0x00001];         /* force source IP */
+    pseudo_bit_t       fsm[0x00001];          /* force source MAC */
+    pseudo_bit_t       reserved7[0x0000a];
+    pseudo_bit_t       sched_queue[0x00008];
+/* -------------- */
+    pseudo_bit_t       dmac_47_32[0x00010];
+    pseudo_bit_t       vlan_index[0x00007];
+    pseudo_bit_t       reserved8[0x00001];
+    pseudo_bit_t       counter_index[0x00008];/* Index to a table of counters that counts egress packets and bytes, 0xFF not valid */
+/* -------------- */
+    pseudo_bit_t       dmac_31_0[0x00020];
+/* -------------- */
+}; 
+
+/* HCA Command Register (HCR)    #### michal - match PRM */
+
+struct hermonprm_hca_command_register_st {     /* Little Endian */
+    pseudo_bit_t       in_param_h[0x00020];   /* Input Parameter: parameter[63:32] or pointer[63:32] to input mailbox (see command description) */
+/* -------------- */
+    pseudo_bit_t       in_param_l[0x00020];   /* Input Parameter: parameter[31:0] or pointer[31:0] to input mailbox (see command description) */
+/* -------------- */
+    pseudo_bit_t       input_modifier[0x00020];/* Input Parameter Modifier */
+/* -------------- */
+    pseudo_bit_t       out_param_h[0x00020];  /* Output Parameter: parameter[63:32] or pointer[63:32] to output mailbox (see command description) */
+/* -------------- */
+    pseudo_bit_t       out_param_l[0x00020];  /* Output Parameter: parameter[31:0] or pointer[31:0] to output mailbox (see command description) */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00010];
+    pseudo_bit_t       token[0x00010];        /* Software assigned token to the command, to uniquely identify it. The token is returned to the software in the EQE reported. */
+/* -------------- */
+    pseudo_bit_t       opcode[0x0000c];       /* Command opcode */
+    pseudo_bit_t       opcode_modifier[0x00004];/* Opcode Modifier, see specific description for each command. */
+    pseudo_bit_t       reserved1[0x00005];
+    pseudo_bit_t       t[0x00001];            /* Toggle */
+    pseudo_bit_t       e[0x00001];            /* Event Request
+                                                 0 - Don't report event (software will poll the GO bit)
+                                                 1 - Report event to EQ when the command completes */
+    pseudo_bit_t       go[0x00001];           /* Go (0=Software ownership for the HCR, 1=Hardware ownership for the HCR)
+                                                 Software can write to the HCR only if Go bit is cleared.
+                                                 Software must set the Go bit to trigger the HW to execute the command. Software must not write to this register value other than 1 for the Go bit. */
+    pseudo_bit_t       status[0x00008];       /* Command execution status report. Valid only if command interface in under SW ownership (Go bit is cleared)
+                                                 0 - command completed without error. If different than zero, command execution completed with error. Syndrom encoding is depended on command executed and is defined for each command */
+/* -------------- */
+}; 
+
+/* CQ Doorbell */
+
+struct hermonprm_cq_cmd_doorbell_st {  /* Little Endian */
+    pseudo_bit_t       cqn[0x00018];          /* CQ number accessed */
+    pseudo_bit_t       cmd[0x00003];          /* Command to be executed on CQ
+                                                 0x0 - Reserved
+                                                 0x1 - Request notification for next Solicited completion event. CQ_param specifies the current CQ Consumer Counter.
+                                                 0x2 - Request notification for next Solicited or Unsolicited completion event. CQ_param specifies the current CQ Consumer Counter.
+                                                 0x3 - Request notification for multiple completions (Arm-N). CQ_param specifies the value of the CQ Counter that when reached by HW (i.e. HW generates a CQE into this Counter) Event will be generated
+                                                 Other - Reserved */
+    pseudo_bit_t       reserved0[0x00001];
+    pseudo_bit_t       cmd_sn[0x00002];       /* Command Sequence Number - This field should be incremented upon receiving completion notification of the respective CQ.
+                                                 This transition is done by ringing Request notification for next Solicited, Request notification for next Solicited or Unsolicited 
+                                                 completion or Request notification for multiple completions doorbells after receiving completion notification.
+                                                 This field is initialized to Zero */
+    pseudo_bit_t       reserved1[0x00002];
+/* -------------- */
+    pseudo_bit_t       cq_param[0x00020];     /* parameter to be used by CQ command */
+/* -------------- */
+}; 
+
+/* RD-send doorbell */
+
+struct hermonprm_rd_send_doorbell_st { /* Little Endian */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       een[0x00018];          /* End-to-end context number (reliable datagram)
+                                                 Must be zero for Nop and Bind operations */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00008];
+    pseudo_bit_t       qpn[0x00018];          /* QP number this doorbell is rung on */
+/* -------------- */
+    struct hermonprm_send_doorbell_st  send_doorbell;/* Send Parameters */
+/* -------------- */
+}; 
+
+/* Multicast Group Member QP   #### michal - match PRM */
+
+struct hermonprm_mgmqp_st {    /* Little Endian */
+    pseudo_bit_t       qpn_i[0x00018];        /* QPN_i: QP number which is a member in this multicast group. Valid only if Qi bit is set. Length of the QPN_i list is set in INIT_HCA */
+    pseudo_bit_t       reserved0[0x00006];
+    pseudo_bit_t       blck_lb[0x00001];      /* Block self-loopback messages arriving to this qp */
+    pseudo_bit_t       qi[0x00001];           /* Qi: QPN_i is valid */
+/* -------------- */
+}; 
+
+/* vsd */
+
+struct hermonprm_vsd_st {      /* Little Endian */
+    pseudo_bit_t       vsd_dw0[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw1[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw2[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw3[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw4[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw5[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw6[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw7[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw8[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw9[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw10[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw11[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw12[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw13[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw14[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw15[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw16[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw17[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw18[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw19[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw20[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw21[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw22[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw23[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw24[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw25[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw26[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw27[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw28[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw29[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw30[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw31[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw32[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw33[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw34[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw35[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw36[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw37[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw38[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw39[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw40[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw41[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw42[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw43[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw44[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw45[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw46[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw47[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw48[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw49[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw50[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw51[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw52[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw53[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw54[0x00020];
+/* -------------- */
+    pseudo_bit_t       vsd_dw55[0x00020];
+/* -------------- */
+}; 
+
+/* UAR Parameters */
+
+struct hermonprm_uar_params_st {       /* Little Endian */
+    pseudo_bit_t       reserved0[0x00040];
+/* -------------- */
+    pseudo_bit_t       uar_page_sz[0x00008];  /* This field defines the size of each UAR page.
+                                                 Size of UAR Page is 4KB*2^UAR_Page_Size */
+    pseudo_bit_t       log_max_uars[0x00004]; /* Number of UARs supported is 2^log_max_UARs */
+    pseudo_bit_t       reserved1[0x00014];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x000a0];
+/* -------------- */
+}; 
+
+/* Translation and Protection Tables Parameters */
+
+struct hermonprm_tptparams_st {        /* Little Endian */
+    pseudo_bit_t       dmpt_base_adr_h[0x00020];/* dMPT - Memory Protection Table base physical address [63:32].
+                                                 Entry size is 64 bytes.
+                                                 Table must be aligned to its size.
+                                                 Address may be set to 0xFFFFFFFF if address translation and protection is not supported. */
+/* -------------- */
+    pseudo_bit_t       dmpt_base_adr_l[0x00020];/* dMPT - Memory Protection Table base physical address [31:0].
+                                                 Entry size is 64 bytes.
+                                                 Table must be aligned to its size.
+                                                 Address may be set to 0xFFFFFFFF if address translation and protection is not supported. */
+/* -------------- */
+    pseudo_bit_t       log_dmpt_sz[0x00006];  /* Log (base 2) of the number of region/windows entries in the dMPT table. */
+    pseudo_bit_t       reserved0[0x00002];
+    pseudo_bit_t       pfto[0x00005];         /* Page Fault RNR Timeout - 
+                                                 The field returned in RNR Naks generated when a page fault is detected.
+                                                 It has no effect when on-demand-paging is not used. */
+    pseudo_bit_t       reserved1[0x00013];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00020];
+/* -------------- */
+    pseudo_bit_t       mtt_base_addr_h[0x00020];/* MTT - Memory Translation table base physical address [63:32].
+                                                 Table must be aligned to its size.
+                                                 Address may be set to 0xFFFFFFFF if address translation and protection is not supported. */
+/* -------------- */
+    pseudo_bit_t       mtt_base_addr_l[0x00020];/* MTT - Memory Translation table base physical address [31:0].
+                                                 Table must be aligned to its size.
+                                                 Address may be set to 0xFFFFFFFF if address translation and protection is not supported. */
+/* -------------- */
+    pseudo_bit_t       cmpt_base_adr_h[0x00020];/* cMPT - Memory Protection Table base physical address [63:32].
+                                                 Entry size is 64 bytes.
+                                                 Table must be aligned to its size. */
+/* -------------- */
+    pseudo_bit_t       cmpt_base_adr_l[0x00020];/* cMPT - Memory Protection Table base physical address [31:0].
+                                                 Entry size is 64 bytes.
+                                                 Table must be aligned to its size. */
+/* -------------- */
+}; 
+
+/* Multicast Support Parameters   #### michal - match PRM */
+
+struct hermonprm_multicastparam_st {   /* Little Endian */
+    pseudo_bit_t       mc_base_addr_h[0x00020];/* Base Address of the Multicast Table [63:32].
+                                                 The base address must be aligned to the entry size.
+                                                 Address may be set to 0xFFFFFFFF if multicast is not supported. */
+/* -------------- */
+    pseudo_bit_t       mc_base_addr_l[0x00020];/* Base Address of the Multicast Table [31:0]. 
+                                                 The base address must be aligned to the entry size.
+                                                 Address may be set to 0xFFFFFFFF if multicast is not supported. */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00040];
+/* -------------- */
+    pseudo_bit_t       log_mc_table_entry_sz[0x00005];/* Log2 of the Size of multicast group member (MGM) entry.
+                                                 Must be greater than 5 (to allow CTRL and GID sections). 
+                                                 That implies the number of QPs per MC table entry. */
+    pseudo_bit_t       reserved1[0x0000b];
+    pseudo_bit_t       reserved2[0x00010];
+/* -------------- */
+    pseudo_bit_t       log_mc_table_hash_sz[0x00005];/* Number of entries in multicast DGID hash table (must be power of 2)
+                                                 INIT_HCA - the required number of entries
+                                                 QUERY_HCA - the actual number of entries assigned by firmware (will be less than or equal to the amount required in INIT_HCA) */
+    pseudo_bit_t       reserved3[0x0001b];
+/* -------------- */
+    pseudo_bit_t       log_mc_table_sz[0x00005];/* Log2 of the overall number of MC entries in the MCG table (includes both hash and auxiliary tables) */
+    pseudo_bit_t       reserved4[0x00013];
+    pseudo_bit_t       mc_hash_fn[0x00003];   /* Multicast hash function
+                                                 0 - Default hash function
+                                                 other - reserved */
+    pseudo_bit_t       reserved5[0x00005];
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00020];
+/* -------------- */
+}; 
+
+/* QPC/EEC/CQC/EQC/RDB Parameters   #### michal - doesn't match PRM (field name are differs. see below) */
+
+struct hermonprm_qpcbaseaddr_st {      /* Little Endian */
+    pseudo_bit_t       reserved0[0x00080];
+/* -------------- */
+    pseudo_bit_t       qpc_base_addr_h[0x00020];/* QPC Base Address [63:32]
+                                                 Table must be aligned on its size */
+/* -------------- */
+    pseudo_bit_t       log_num_of_qp[0x00005];/* Log base 2 of number of supported QPs */
+    pseudo_bit_t       qpc_base_addr_l[0x0001b];/* QPC Base Address [31:7]
+                                                 Table must be aligned on its size */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00040];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00040];
+/* -------------- */
+    pseudo_bit_t       srqc_base_addr_h[0x00020];/* SRQ Context Base Address [63:32]
+                                                 Table must be aligned on its size
+                                                 Address may be set to 0xFFFFFFFF if SRQ is not supported. */
+/* -------------- */
+    pseudo_bit_t       log_num_of_srq[0x00005];/* Log base 2 of number of supported SRQs. */
+    pseudo_bit_t       srqc_base_addr_l[0x0001b];/* SRQ Context Base Address [31:5]
+                                                 Table must be aligned on its size
+                                                 Address may be set to 0xFFFFFFFF if SRQ is not supported. */
+/* -------------- */
+    pseudo_bit_t       cqc_base_addr_h[0x00020];/* CQC Base Address [63:32]
+                                                 Table must be aligned on its size */
+/* -------------- */
+    pseudo_bit_t       log_num_of_cq[0x00005];/* Log base 2 of number of supported CQs. */
+    pseudo_bit_t       cqc_base_addr_l[0x0001b];/* CQC Base Address [31:6]
+                                                 Table must be aligned on its size */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00040];
+/* -------------- */
+    pseudo_bit_t       altc_base_addr_h[0x00020];/* AltC Base Address (altc_base_addr_h) [63:32]
+                                                 Table has same number of entries as QPC table.
+                                                 Table must be aligned to entry size. */
+/* -------------- */
+    pseudo_bit_t       altc_base_addr_l[0x00020];/* AltC Base Address (altc_base_addr_l) [31:0]
+                                                 Table has same number of entries as QPC table.
+                                                 Table must be aligned to entry size. */
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00040];
+/* -------------- */
+    pseudo_bit_t       auxc_base_addr_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       auxc_base_addr_l[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved5[0x00040];
+/* -------------- */
+    pseudo_bit_t       eqc_base_addr_h[0x00020];/* EQC Base Address [63:32]
+                                                 Address may be set to 0xFFFFFFFF if EQs are not supported.
+                                                 Table must be aligned to entry size. */
+/* -------------- */
+    pseudo_bit_t       log_num_of_eq[0x00005];/* Log base 2 of number of supported EQs.
+                                                 Must be 6 or less in InfiniHost-III-EX. */
+    pseudo_bit_t       eqc_base_addr_l[0x0001b];/* EQC Base Address [31:6]
+                                                 Address may be set to 0xFFFFFFFF if EQs are not supported.
+                                                 Table must be aligned to entry size. */
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00040];
+/* -------------- */
+    pseudo_bit_t       rdmardc_base_addr_h[0x00020];/* rdmardc_base_addr_h: Base address of table that holds remote read and remote atomic requests [63:32]. */
+/* -------------- */
+    pseudo_bit_t       log_num_rd[0x00003];   /* Log (base 2) of the maximum number of RdmaRdC entries per QP. This denotes the maximum number of outstanding reads/atomics as a responder. */
+    pseudo_bit_t       reserved7[0x00002];
+    pseudo_bit_t       rdmardc_base_addr_l[0x0001b];/* rdmardc_base_addr_l: Base address of table that holds remote read and remote atomic requests [31:0]. 
+                                                 Table must be aligned to RDB entry size (32 bytes). */
+/* -------------- */
+    pseudo_bit_t       reserved8[0x00040];
+/* -------------- */
+}; 
+
+/* Header_Log_Register */
+
+struct hermonprm_header_log_register_st {      /* Little Endian */
+    pseudo_bit_t       place_holder[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00060];
+/* -------------- */
+}; 
+
+/* Performance Monitors */
+
+struct hermonprm_performance_monitors_st {     /* Little Endian */
+    pseudo_bit_t       e0[0x00001];           /* Enables counting of respective performance counter */
+    pseudo_bit_t       e1[0x00001];           /* Enables counting of respective performance counter */
+    pseudo_bit_t       e2[0x00001];           /* Enables counting of respective performance counter */
+    pseudo_bit_t       reserved0[0x00001];
+    pseudo_bit_t       r0[0x00001];           /* If written to as '1 - resets respective performance counter, if written to az '0 - no change to matter */
+    pseudo_bit_t       r1[0x00001];           /* If written to as '1 - resets respective performance counter, if written to az '0 - no change to matter */
+    pseudo_bit_t       r2[0x00001];           /* If written to as '1 - resets respective performance counter, if written to az '0 - no change to matter */
+    pseudo_bit_t       reserved1[0x00001];
+    pseudo_bit_t       i0[0x00001];           /* Interrupt enable on respective counter overflow. '1 - interrupt enabled, '0 - interrupt disabled. */
+    pseudo_bit_t       i1[0x00001];           /* Interrupt enable on respective counter overflow. '1 - interrupt enabled, '0 - interrupt disabled. */
+    pseudo_bit_t       i2[0x00001];           /* Interrupt enable on respective counter overflow. '1 - interrupt enabled, '0 - interrupt disabled. */
+    pseudo_bit_t       reserved2[0x00001];
+    pseudo_bit_t       f0[0x00001];           /* Overflow flag. If set, overflow occurred on respective counter. Cleared if written to as '1 */
+    pseudo_bit_t       f1[0x00001];           /* Overflow flag. If set, overflow occurred on respective counter. Cleared if written to as '1 */
+    pseudo_bit_t       f2[0x00001];           /* Overflow flag. If set, overflow occurred on respective counter. Cleared if written to as '1 */
+    pseudo_bit_t       reserved3[0x00001];
+    pseudo_bit_t       ev_cnt1[0x00005];      /* Specifies event to be counted by Event_counter1 See XXX for events' definition. */
+    pseudo_bit_t       reserved4[0x00003];
+    pseudo_bit_t       ev_cnt2[0x00005];      /* Specifies event to be counted by Event_counter2 See XXX for events' definition. */
+    pseudo_bit_t       reserved5[0x00003];
+/* -------------- */
+    pseudo_bit_t       clock_counter[0x00020];
+/* -------------- */
+    pseudo_bit_t       event_counter1[0x00020];
+/* -------------- */
+    pseudo_bit_t       event_counter2[0x00020];/* Read/write event counter, counting events specified by EvCntl and EvCnt2 fields repsectively. When the event counter reaches is maximum value of 0xFFFFFF, the next event will cause it to roll over to zero, set F1 or F2 bit respectively and generate interrupt by I1 I2 bit respectively. */
+/* -------------- */
+}; 
+
+/* MLX WQE segment format */
+
+struct hermonprm_wqe_segment_ctrl_mlx_st {     /* Little Endian */
+    pseudo_bit_t       opcode[0x00005];       /* must be 0xA = SEND */
+    pseudo_bit_t       reserved0[0x0001a];
+    pseudo_bit_t       owner[0x00001];
+/* -------------- */
+    pseudo_bit_t       ds[0x00006];           /* Descriptor Size */
+    pseudo_bit_t       reserved1[0x0001a];
+/* -------------- */
+    pseudo_bit_t       fl[0x00001];           /* Force LoopBack */
+    pseudo_bit_t       reserved2[0x00001];
+    pseudo_bit_t       c[0x00002];            /* Create CQE (for "requested signalling" QP) */
+    pseudo_bit_t       icrc[0x00001];         /* last dword of the packet: 0 - Calculate ICRC and put it instead of last dword. 1 - Leave last dword as is. */
+    pseudo_bit_t       reserved3[0x00003];
+    pseudo_bit_t       sl[0x00004];
+    pseudo_bit_t       max_statrate[0x00004];
+    pseudo_bit_t       slr[0x00001];          /* 0= take slid from port. 1= take slid from given headers */
+    pseudo_bit_t       v15[0x00001];          /* Send packet over VL15 */
+    pseudo_bit_t       reserved4[0x0000e];
+/* -------------- */
+    pseudo_bit_t       reserved5[0x00010];
+    pseudo_bit_t       rlid[0x00010];         /* Destination LID (must match given headers) */
+/* -------------- */
+}; 
+
+/* Send WQE segment format */
+
+struct hermonprm_send_wqe_segment_st { /* Little Endian */
+    struct hermonprm_wqe_segment_ctrl_send_st  wqe_segment_ctrl_send;/* Send wqe segment ctrl */
+/* -------------- */
+    struct hermonprm_wqe_segment_rd_st wqe_segment_rd;/* Send wqe segment rd */
+/* -------------- */
+    struct hermonprm_wqe_segment_ud_st wqe_segment_ud;/* Send wqe segment ud */
+/* -------------- */
+    struct hermonprm_wqe_segment_bind_st       wqe_segment_bind;/* Send wqe segment bind */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00180];
+/* -------------- */
+    struct hermonprm_wqe_segment_remote_address_st     wqe_segment_remote_address;/* Send wqe segment remote address */
+/* -------------- */
+    struct hermonprm_wqe_segment_atomic_st     wqe_segment_atomic;/* Send wqe segment atomic */
+/* -------------- */
+    struct hermonprm_fast_registration_segment_st      fast_registration_segment;/* Fast Registration Segment */
+/* -------------- */
+    struct hermonprm_local_invalidate_segment_st       local_invalidate_segment;/* local invalidate segment */
+/* -------------- */
+    struct hermonprm_wqe_segment_data_ptr_st   wqe_segment_data_ptr;/* Send wqe segment data ptr */
+/* -------------- */
+    struct hermonprm_wqe_segment_data_inline_st        wqe_segment_data_inline;/* Send wqe segment data inline */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00200];
+/* -------------- */
+}; 
+
+/* QP and EE Context Entry */
+
+struct hermonprm_queue_pair_ee_context_entry_st {      /* Little Endian */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       reserved1[0x00001];
+    pseudo_bit_t       reserved2[0x00002];
+    pseudo_bit_t       pm_state[0x00002];     /* Path migration state (Migrated, Armed or Rearm)
+                                                 11-Migrated
+                                                 00-Armed
+                                                 01-Rearm
+                                                 10-Reserved
+                                                 Should be set to 11 for UD QPs and for QPs which do not support APM */
+    pseudo_bit_t       reserved3[0x00003];
+    pseudo_bit_t       st[0x00004];           /* Transport Service Type: RC: 0, UC: 1, RD: 2, UD: 3, FCMND:4, FEXCH:5, SRC:6, MLX 7, Raw Eth 11 */
+    pseudo_bit_t       reserved4[0x00008];
+    pseudo_bit_t       state[0x00004];        /* QP/EE state:
+                                                 0 - RST
+                                                 1 - INIT
+                                                 2 - RTR
+                                                 3 - RTS
+                                                 4 - SQEr
+                                                 5 - SQD (Send Queue Drained)
+                                                 6 - ERR
+                                                 7 - Send Queue Draining
+                                                 8 - Reserved
+                                                 9 - Suspended
+                                                 A- F - Reserved
+                                                 (Valid for QUERY_QPEE and ERR2RST_QPEE commands only) */
+/* -------------- */
+    pseudo_bit_t       pd[0x00018];
+    pseudo_bit_t       reserved5[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00004];
+    pseudo_bit_t       rlky[0x00001];         /* When set this QP can use the Reserved L_Key */
+    pseudo_bit_t       reserved7[0x00003];
+    pseudo_bit_t       log_sq_stride[0x00003];/* Stride on the send queue. WQ entry is 16*(2^log_SQ_stride) bytes.
+                                                 Stride must be equal or bigger then 64 bytes (minimum log_RQ_stride value allowed is 2). */
+    pseudo_bit_t       log_sq_size[0x00004];  /* Log2 of the Number of WQEs in the Send Queue. */
+    pseudo_bit_t       reserved8[0x00001];
+    pseudo_bit_t       log_rq_stride[0x00003];/* Stride on the receive queue. WQ entry is 16*(2^log_RQ_stride) bytes.
+                                                 Stride must be equal or bigger then 64 bytes (minimum log_RQ_stride value allowed is 2). */
+    pseudo_bit_t       log_rq_size[0x00004];  /* Log2 of the Number of WQEs in the Receive Queue. */
+    pseudo_bit_t       reserved9[0x00001];
+    pseudo_bit_t       msg_max[0x00005];      /* Max message size allowed on the QP. Maximum message size is 2^msg_Max.
+                                                 Must be equal to MTU for UD and MLX QPs. */
+    pseudo_bit_t       mtu[0x00003];          /* MTU of the QP (Must be the same for both paths: primary and alternative):
+                                                 0x1 - 256 bytes
+                                                 0x2 - 512
+                                                 0x3 - 1024
+                                                 0x4 - 2048
+                                                 other - reserved
+                                                 
+                                                 Should be configured to 0x4 for UD and MLX QPs. */
+/* -------------- */
+    pseudo_bit_t       usr_page[0x00018];     /* UAR number to ring doorbells for this QP (aliased to doorbell and Blue Flame pages) */
+    pseudo_bit_t       reserved10[0x00008];
+/* -------------- */
+    pseudo_bit_t       local_qpn_een[0x00018];/* Local QP/EE number Lower bits determine position of this record in QPC table, and - thus - constrained
+                                                 This field is valid for QUERY and ERR2RST commands only. */
+    pseudo_bit_t       reserved11[0x00008];
+/* -------------- */
+    pseudo_bit_t       remote_qpn_een[0x00018];/* Remote QP/EE number */
+    pseudo_bit_t       reserved12[0x00008];
+/* -------------- */
+    struct hermonprm_address_path_st   primary_address_path;/* Primary address path for the QP/EE */
+/* -------------- */
+    struct hermonprm_address_path_st   alternative_address_path;/* Alternate address path for the QP/EE */
+/* -------------- */
+    pseudo_bit_t       reserved13[0x00003];
+    pseudo_bit_t       reserved14[0x00001];
+    pseudo_bit_t       reserved15[0x00001];
+    pseudo_bit_t       cur_retry_cnt[0x00003];/* Current transport retry counter (QUERY_QPEE only).
+                                                 The current transport retry counter can vary from retry_count down to 1, where 1 means that the last retry attempt is currently executing. */
+    pseudo_bit_t       cur_rnr_retry[0x00003];/* Current RNR retry counter (QUERY_QPEE only).
+                                                 The current RNR retry counter can vary from rnr_retry to 1, where 1 means that the last retry attempt is currently executing. */
+    pseudo_bit_t       fre[0x00001];          /* Fast Registration Work Request Enabled. (Reserved for EE) */
+    pseudo_bit_t       reserved16[0x00001];
+    pseudo_bit_t       rnr_retry[0x00003];
+    pseudo_bit_t       retry_count[0x00003];  /* Transport timeout Retry count */
+    pseudo_bit_t       reserved17[0x00002];
+    pseudo_bit_t       sra_max[0x00003];      /* Maximum number of outstanding RDMA-read/Atomic operations allowed in the send queue. Maximum number is 2^SRA_Max. Must be zero in EE context. */
+    pseudo_bit_t       reserved18[0x00004];
+    pseudo_bit_t       ack_req_freq[0x00004]; /* ACK required frequency. ACK required bit will be set in every 2^AckReqFreq packets at least. Not valid for RD QP. */
+/* -------------- */
+    pseudo_bit_t       reserved19[0x00020];
+/* -------------- */
+    pseudo_bit_t       next_send_psn[0x00018];/* Next PSN to be sent */
+    pseudo_bit_t       reserved20[0x00008];
+/* -------------- */
+    pseudo_bit_t       cqn_snd[0x00018];      /* CQ number completions from the send queue to be reported to. Not valid (reserved) in EE context. */
+    pseudo_bit_t       reserved21[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved22[0x00040];
+/* -------------- */
+    pseudo_bit_t       last_acked_psn[0x00018];/* The last acknowledged PSN for the requester (QUERY_QPEE only) */
+    pseudo_bit_t       reserved23[0x00008];
+/* -------------- */
+    pseudo_bit_t       ssn[0x00018];          /* Requester Send Sequence Number (QUERY_QPEE only) */
+    pseudo_bit_t       reserved24[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved25[0x00004];
+    pseudo_bit_t       ric[0x00001];          /* Invalid Credits. 
+                                                 1 - place "Invalid Credits" to ACKs sent from this queue.
+                                                 0 - ACKs report the actual number of end to end credits on the connection.
+                                                 Not valid (reserved) in EE context.
+                                                 Must be set to 1 on QPs which are attached to SRQ. */
+    pseudo_bit_t       reserved26[0x00001];
+    pseudo_bit_t       page_offset[0x00006];  /* start address of wqes in first page (11:6), bits [5:0] reserved */
+    pseudo_bit_t       reserved27[0x00001];
+    pseudo_bit_t       rae[0x00001];          /* If set - Atomic operations enabled. on receive queue. Not valid (reserved) in EE context. */
+    pseudo_bit_t       rwe[0x00001];          /* If set - RDMA - write enabled on receive queue. Not valid (reserved) in EE context. */
+    pseudo_bit_t       rre[0x00001];          /* If set - RDMA - read enabled on receive queue. Not valid (reserved) in EE context. */
+    pseudo_bit_t       reserved28[0x00005];
+    pseudo_bit_t       rra_max[0x00003];      /* Maximum number of outstanding RDMA-read/Atomic operations allowed on receive queue is 2^RRA_Max. 
+                                                 Must be 0 for EE context. */
+    pseudo_bit_t       reserved29[0x00008];
+/* -------------- */
+    pseudo_bit_t       next_rcv_psn[0x00018]; /* Next (expected) PSN on receive */
+    pseudo_bit_t       min_rnr_nak[0x00005];  /* Minimum RNR NAK timer value (TTTTT field encoding according to the IB spec Vol1 9.7.5.2.8). 
+                                                 Not valid (reserved) in EE context. */
+    pseudo_bit_t       reserved30[0x00003];
+/* -------------- */
+    pseudo_bit_t       srcd[0x00010];         /* Scalable Reliable Connection Domain. Valid for SRC transport service */
+    pseudo_bit_t       reserved31[0x00010];
+/* -------------- */
+    pseudo_bit_t       cqn_rcv[0x00018];      /* CQ number completions from receive queue to be reported to. Not valid (reserved) in EE context. */
+    pseudo_bit_t       reserved32[0x00008];
+/* -------------- */
+    pseudo_bit_t       db_record_addr_h[0x00020];/* QP DB Record physical address */
+/* -------------- */
+    pseudo_bit_t       reserved33[0x00002];
+    pseudo_bit_t       db_record_addr_l[0x0001e];/* QP DB Record physical address */
+/* -------------- */
+    pseudo_bit_t       q_key[0x00020];        /* Q_Key to be validated against received datagrams.
+                                                 On send datagrams, if Q_Key[31] specified in the WQE is set, then this Q_Key will be transmitted in the outgoing message.
+                                                 Not valid (reserved) in EE context. */
+/* -------------- */
+    pseudo_bit_t       srqn[0x00018];         /* SRQN - Shared Receive Queue Number - specifies the SRQ number from which the QP dequeues receive descriptors. 
+                                                 SRQN is valid only if SRQ bit is set. Not valid (reserved) in EE context. */
+    pseudo_bit_t       srq[0x00001];          /* SRQ - Shared Receive Queue. If this bit is set, then the QP is associated with a SRQ. Not valid (reserved) in EE context. */
+    pseudo_bit_t       reserved34[0x00007];
+/* -------------- */
+    pseudo_bit_t       rmsn[0x00018];         /* Responder current message sequence number (QUERY_QPEE only) */
+    pseudo_bit_t       reserved35[0x00008];
+/* -------------- */
+    pseudo_bit_t       sq_wqe_counter[0x00010];/* A 16bits counter that is incremented for each WQE posted to the SQ.
+                                                 Must be 0x0 in SQ initialization.
+                                                 (QUERY_QPEE only). */
+    pseudo_bit_t       rq_wqe_counter[0x00010];/* A 16bits counter that is incremented for each WQE posted to the RQ.
+                                                 Must be 0x0 in RQ initialization.
+                                                 (QUERY_QPEE only). */
+/* -------------- */
+    pseudo_bit_t       reserved36[0x00040];
+/* -------------- */
+    pseudo_bit_t       rmc_parent_qpn[0x00018];/* reliable multicast parent queue number */
+    pseudo_bit_t       hs[0x00001];           /* Header Separation. If set, the byte count of the first scatter entry will be ignored. The buffer specified by the first scatter entry will contain packet headers (up to TCP). CQE will report number of bytes scattered to the first scatter entry. Intended for use on IPoverIB on UD QP or Raw Ethernet QP. */
+    pseudo_bit_t       is[0x00001];           /* when set - inline scatter is enabled for this RQ */
+    pseudo_bit_t       reserved37[0x00001];
+    pseudo_bit_t       rme[0x00002];          /* Reliable Multicast
+                                                 00 - disabled
+                                                 01 - parent QP (requester)
+                                                 10 - child QP (requester)
+                                                 11 - responder QP
+                                                 Note that Reliable Multicast is a preliminary definition which can be subject to change. */
+    pseudo_bit_t       reserved38[0x00002];
+    pseudo_bit_t       mkey_rmp[0x00001];     /* If set, MKey used to access TPT for incoming RDMA-write request is calculated by adding MKey from the packet to base_MKey field in the QPC. Can be set only for QPs that are not target for RDMA-read request. */
+/* -------------- */
+    pseudo_bit_t       base_mkey[0x00018];    /* Base Mkey bits [31:8]. Lower 8 bits must be zero. */
+    pseudo_bit_t       num_rmc_peers[0x00008];/* Number of remote peers in Reliable Multicast group */
+/* -------------- */
+    pseudo_bit_t       mtt_base_addr_h[0x00008];/* MTT Base Address [39:32] in ICM relative to INIT_HCA.mtt_base_addr */
+    pseudo_bit_t       reserved39[0x00010];
+    pseudo_bit_t       log2_page_size[0x00006];/* Log (base 2) of MTT page size in units of 4KByte */
+    pseudo_bit_t       reserved40[0x00002];
+/* -------------- */
+    pseudo_bit_t       reserved41[0x00003];
+    pseudo_bit_t       mtt_base_addr_l[0x0001d];/* MTT Base Address [31:3] in ICM relative to INIT_HCA.mtt_base_addr */
+/* -------------- */
+    pseudo_bit_t       vft_lan[0x0000c];
+    pseudo_bit_t       vft_prio[0x00003];     /* The Priority filed in the VFT header for FCP */
+    pseudo_bit_t       reserved42[0x00001];
+    pseudo_bit_t       cs_ctl[0x00009];       /* The Priority filed in the VFT header for FCP */
+    pseudo_bit_t       reserved43[0x00006];
+    pseudo_bit_t       ve[0x00001];           /* Should we add/check the VFT header */
+/* -------------- */
+    pseudo_bit_t       exch_base[0x00010];    /* For init QP only - The base exchanges */
+    pseudo_bit_t       reserved44[0x00008];
+    pseudo_bit_t       exch_size[0x00004];    /* For CMMD QP only - The size (from base) exchanges is 2exchanges_size */
+    pseudo_bit_t       reserved45[0x00003];
+    pseudo_bit_t       fc[0x00001];           /* When set it mean that this QP is used for FIBRE CHANNEL. */
+/* -------------- */
+    pseudo_bit_t       remote_id[0x00018];    /* Peer NX port ID */
+    pseudo_bit_t       reserved46[0x00008];
+/* -------------- */
+    pseudo_bit_t       fcp_mtu[0x0000a];      /* In 4*Bytes units. The MTU Size */
+    pseudo_bit_t       reserved47[0x00006];
+    pseudo_bit_t       my_id_indx[0x00008];   /* Index to My NX port ID table */
+    pseudo_bit_t       vft_hop_count[0x00008];/* HopCnt value for the VFT header */
+/* -------------- */
+    pseudo_bit_t       reserved48[0x000c0];
+/* -------------- */
+}; 
+
+/*  */
+
+struct hermonprm_mcg_qp_dw_st {        /* Little Endian */
+    pseudo_bit_t       qpn[0x00018];
+    pseudo_bit_t       reserved0[0x00006];
+    pseudo_bit_t       blck_lb[0x00001];
+    pseudo_bit_t       reserved1[0x00001];
+/* -------------- */
+}; 
+
+/* Clear Interrupt [63:0]              #### michal - match to PRM */
+
+struct hermonprm_clr_int_st {  /* Little Endian */
+    pseudo_bit_t       clr_int_h[0x00020];    /* Clear Interrupt [63:32]
+                                                 Write transactions to this register will clear (de-assert) the virtual interrupt output pins of InfiniHost-III-EX. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. 
+                                                 This register is write-only. Reading from this register will cause undefined result
+                                                  */
+/* -------------- */
+    pseudo_bit_t       clr_int_l[0x00020];    /* Clear Interrupt [31:0]
+                                                 Write transactions to this register will clear (de-assert) the virtual interrupt output pins of InfiniHost-III-EX. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. 
+                                                 This register is write-only. Reading from this register will cause undefined result */
+/* -------------- */
+}; 
+
+/* EQ Set CI DBs Table */
+
+struct hermonprm_eq_set_ci_table_st {  /* Little Endian */
+    pseudo_bit_t       eq0_set_ci[0x00020];   /* EQ0_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq1_set_ci[0x00020];   /* EQ1_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq2_set_ci[0x00020];   /* EQ2_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq3_set_ci[0x00020];   /* EQ3_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq4_set_ci[0x00020];   /* EQ4_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq5_set_ci[0x00020];   /* EQ5_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved5[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq6_set_ci[0x00020];   /* EQ6_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq7_set_ci[0x00020];   /* EQ7_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved7[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq8_set_ci[0x00020];   /* EQ8_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved8[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq9_set_ci[0x00020];   /* EQ9_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved9[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq10_set_ci[0x00020];  /* EQ10_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq11_set_ci[0x00020];  /* EQ11_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved11[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq12_set_ci[0x00020];  /* EQ12_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved12[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq13_set_ci[0x00020];  /* EQ13_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved13[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq14_set_ci[0x00020];  /* EQ14_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved14[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq15_set_ci[0x00020];  /* EQ15_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved15[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq16_set_ci[0x00020];  /* EQ16_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved16[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq17_set_ci[0x00020];  /* EQ17_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved17[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq18_set_ci[0x00020];  /* EQ18_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved18[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq19_set_ci[0x00020];  /* EQ19_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved19[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq20_set_ci[0x00020];  /* EQ20_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved20[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq21_set_ci[0x00020];  /* EQ21_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved21[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq22_set_ci[0x00020];  /* EQ22_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved22[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq23_set_ci[0x00020];  /* EQ23_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved23[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq24_set_ci[0x00020];  /* EQ24_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved24[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq25_set_ci[0x00020];  /* EQ25_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved25[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq26_set_ci[0x00020];  /* EQ26_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved26[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq27_set_ci[0x00020];  /* EQ27_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved27[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq28_set_ci[0x00020];  /* EQ28_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved28[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq29_set_ci[0x00020];  /* EQ29_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved29[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq30_set_ci[0x00020];  /* EQ30_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved30[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq31_set_ci[0x00020];  /* EQ31_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved31[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq32_set_ci[0x00020];  /* EQ32_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved32[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq33_set_ci[0x00020];  /* EQ33_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved33[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq34_set_ci[0x00020];  /* EQ34_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved34[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq35_set_ci[0x00020];  /* EQ35_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved35[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq36_set_ci[0x00020];  /* EQ36_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved36[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq37_set_ci[0x00020];  /* EQ37_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved37[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq38_set_ci[0x00020];  /* EQ38_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved38[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq39_set_ci[0x00020];  /* EQ39_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved39[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq40_set_ci[0x00020];  /* EQ40_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved40[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq41_set_ci[0x00020];  /* EQ41_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved41[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq42_set_ci[0x00020];  /* EQ42_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved42[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq43_set_ci[0x00020];  /* EQ43_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved43[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq44_set_ci[0x00020];  /* EQ44_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved44[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq45_set_ci[0x00020];  /* EQ45_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved45[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq46_set_ci[0x00020];  /* EQ46_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved46[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq47_set_ci[0x00020];  /* EQ47_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved47[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq48_set_ci[0x00020];  /* EQ48_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved48[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq49_set_ci[0x00020];  /* EQ49_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved49[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq50_set_ci[0x00020];  /* EQ50_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved50[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq51_set_ci[0x00020];  /* EQ51_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved51[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq52_set_ci[0x00020];  /* EQ52_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved52[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq53_set_ci[0x00020];  /* EQ53_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved53[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq54_set_ci[0x00020];  /* EQ54_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved54[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq55_set_ci[0x00020];  /* EQ55_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved55[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq56_set_ci[0x00020];  /* EQ56_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved56[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq57_set_ci[0x00020];  /* EQ57_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved57[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq58_set_ci[0x00020];  /* EQ58_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved58[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq59_set_ci[0x00020];  /* EQ59_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved59[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq60_set_ci[0x00020];  /* EQ60_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved60[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq61_set_ci[0x00020];  /* EQ61_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved61[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq62_set_ci[0x00020];  /* EQ62_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved62[0x00020];
+/* -------------- */
+    pseudo_bit_t       eq63_set_ci[0x00020];  /* EQ63_Set_CI */
+/* -------------- */
+    pseudo_bit_t       reserved63[0x00020];
+/* -------------- */
+}; 
+
+/* InfiniHost-III-EX Configuration Registers     #### michal - match to PRM */
+
+struct hermonprm_configuration_registers_st {  /* Little Endian */
+    pseudo_bit_t       reserved0[0x403400];
+/* -------------- */
+    struct hermonprm_hca_command_register_st   hca_command_interface_register;/* HCA Command Register */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x3fcb20];
+/* -------------- */
+}; 
+
+/* QP_DB_Record         ### michal = gdror fixed */
+
+struct hermonprm_qp_db_record_st {     /* Little Endian */
+    pseudo_bit_t       receive_wqe_counter[0x00010];/* Modulo-64K counter of WQEs posted to the QP since its creation. Should be initialized to zero. */
+    pseudo_bit_t       reserved0[0x00010];
+/* -------------- */
+}; 
+
+/* CQ_ARM_DB_Record */
+
+struct hermonprm_cq_arm_db_record_st { /* Little Endian */
+    pseudo_bit_t       counter[0x00020];      /* CQ counter for the arming request */
+/* -------------- */
+    pseudo_bit_t       cmd[0x00003];          /* 0x0 - No command
+                                                 0x1 - Request notification for next Solicited completion event. Counter filed specifies the current CQ Consumer Counter.
+                                                 0x2 - Request notification for next Solicited or Unsolicited completion event. Counter filed specifies the current CQ Consumer counter.
+                                                 0x3 - Request notification for multiple completions (Arm-N). Counter filed specifies the value of the CQ Index that when reached by HW (i.e. HW generates a CQE into this Index) Event will be generated
+                                                 Other - Reserved */
+    pseudo_bit_t       cmd_sn[0x00002];       /* Command Sequence Number - See Table 35, "CQ Doorbell Layout" for definition of this filed */
+    pseudo_bit_t       res[0x00003];          /* Must be 0x2 */
+    pseudo_bit_t       cq_number[0x00018];    /* CQ number */
+/* -------------- */
+}; 
+
+/* CQ_CI_DB_Record */
+
+struct hermonprm_cq_ci_db_record_st {  /* Little Endian */
+    pseudo_bit_t       counter[0x00020];      /* CQ counter */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00005];
+    pseudo_bit_t       res[0x00003];          /* Must be 0x1 */
+    pseudo_bit_t       cq_number[0x00018];    /* CQ number */
+/* -------------- */
+}; 
+
+/* Virtual_Physical_Mapping */
+
+struct hermonprm_virtual_physical_mapping_st { /* Little Endian */
+    pseudo_bit_t       va_h[0x00020];         /* Virtual Address[63:32]. Valid only for MAP_ICM command. */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x0000c];
+    pseudo_bit_t       va_l[0x00014];         /* Virtual Address[31:12]. Valid only for MAP_ICM command. */
+/* -------------- */
+    pseudo_bit_t       pa_h[0x00020];         /* Physical Address[63:32] */
+/* -------------- */
+    pseudo_bit_t       log2size[0x00006];     /* Log2 of the size in 4KB pages of the physical and virtual contiguous memory that starts at PA_L/H and VA_L/H */
+    pseudo_bit_t       reserved1[0x00006];
+    pseudo_bit_t       pa_l[0x00014];         /* Physical Address[31:12] */
+/* -------------- */
+}; 
+
+/* MOD_STAT_CFG            #### michal - gdror fix */
+
+struct hermonprm_mod_stat_cfg_st {     /* Little Endian */
+    pseudo_bit_t       reserved0[0x00010];
+    pseudo_bit_t       rx_options[0x00004];   /* number of RX options to sweep when doing SerDes parameters AutoNegotiation. */
+    pseudo_bit_t       reserved1[0x00003];
+    pseudo_bit_t       rx_options_m[0x00001]; /* Modify rx_options */
+    pseudo_bit_t       tx_options[0x00004];   /* number of TX options to sweep when doing SerDes parameters AutoNegotiation. */
+    pseudo_bit_t       reserved2[0x00003];
+    pseudo_bit_t       tx_options_m[0x00001]; /* Modify tx_options */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+    pseudo_bit_t       pre_amp[0x00004];      /* Pre Amplitude */
+    pseudo_bit_t       pre_emp_pre_amp[0x00004];
+    pseudo_bit_t       pre_emp_out[0x00004];  /* Pre Emphasis Out */
+    pseudo_bit_t       voltage[0x00004];
+    pseudo_bit_t       equ[0x00004];          /* Equalization */
+    pseudo_bit_t       reserved4[0x0000b];
+    pseudo_bit_t       serdes_m[0x00001];     /* Modify serdes parameters */
+/* -------------- */
+    pseudo_bit_t       lid[0x00010];          /* default LID */
+    pseudo_bit_t       lid_m[0x00001];        /* Modify default LID */
+    pseudo_bit_t       reserved5[0x00003];
+    pseudo_bit_t       port_en[0x00001];      /* enable port (E_Key) */
+    pseudo_bit_t       port_en_m[0x00001];    /* Modify  port_en */
+    pseudo_bit_t       reserved6[0x0000a];
+/* -------------- */
+    pseudo_bit_t       reserved7[0x0001f];
+    pseudo_bit_t       guid_hi_m[0x00001];    /* Modify guid_hi */
+/* -------------- */
+    pseudo_bit_t       guid_hi[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved8[0x0001f];
+    pseudo_bit_t       guid_lo_m[0x00001];    /* Modify guid_lo */
+/* -------------- */
+    pseudo_bit_t       guid_lo[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved9[0x0001f];
+    pseudo_bit_t       nodeguid_hi_m[0x00001];
+/* -------------- */
+    pseudo_bit_t       nodeguid_hi[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved10[0x0001f];
+    pseudo_bit_t       nodeguid_lo_m[0x00001];
+/* -------------- */
+    pseudo_bit_t       nodeguid_lo[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved11[0x00680];
+/* -------------- */
+}; 
+
+/* SRQ Context */
+
+struct hermonprm_srq_context_st {      /* Little Endian */
+    pseudo_bit_t       srqn[0x00018];         /* SRQ number */
+    pseudo_bit_t       log_srq_size[0x00004]; /* Log2 of the Number of WQEs in the Receive Queue.
+                                                 Maximum value is 0x10, i.e. 16M WQEs. */
+    pseudo_bit_t       state[0x00004];        /* SRQ State:
+                                                 1111 - SW Ownership
+                                                 0000 - HW Ownership
+                                                 0001 - Error
+                                                 Valid only on QUERY_SRQ and HW2SW_SRQ commands. */
+/* -------------- */
+    pseudo_bit_t       src_domain[0x00010];   /* The Scalable RC Domain. Messages coming to receive ports specifying this SRQ as receive queue will be served only if SRC_Domain of the SRQ matches SRC_Domain of the transport QP of this message. */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       log_srq_stride[0x00003];/* Stride (max WQE size) on the receive queue. WQ entry is 16*(2^log_RQ_stride) bytes. */
+    pseudo_bit_t       reserved1[0x00005];
+/* -------------- */
+    pseudo_bit_t       cqn[0x00018];          /* Completion Queue to report SRC messages directed to this SRQ. */
+    pseudo_bit_t       page_offset[0x00006];  /* The offset of the first WQE from the beginning of 4Kbyte page (Figure 52,\93Work Queue Buffer Structure\94) */
+    pseudo_bit_t       reserved2[0x00002];
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+    pseudo_bit_t       mtt_base_addr_h[0x00008];/* MTT Base Address [39:32] in ICM relative to INIT_HCA.mtt_base_addr */
+    pseudo_bit_t       reserved4[0x00010];
+    pseudo_bit_t       log2_page_size[0x00006];/* Log (base 2) of MTT page size in units of 4KByte */
+    pseudo_bit_t       reserved5[0x00002];
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00003];
+    pseudo_bit_t       mtt_base_addr_l[0x0001d];/* MTT Base Address [31:3] in ICM relative to INIT_HCA.mtt_base_addr */
+/* -------------- */
+    pseudo_bit_t       pd[0x00018];           /* SRQ protection domain */
+    pseudo_bit_t       reserved7[0x00008];
+/* -------------- */
+    pseudo_bit_t       wqe_cnt[0x00010];      /* WQE count on the SRQ. Valid only upon QUERY_SRQ and HW2SW_SRQ commands. */
+    pseudo_bit_t       lwm[0x00010];          /* Limit Water Mark - if the LWM is not zero, and the wqe_cnt drops below LWM when a WQE is dequeued from the SRQ, then an SRQ limit event is fired and the LWM is set to zero. Valid only upon QUERY_SRQ and HW2SW_SRQ commands. */
+/* -------------- */
+    pseudo_bit_t       srq_wqe_counter[0x00010];/* A 16-bit counter incremented for each WQE posted to the SRQ. Must be 0x0 in SRQ initialization. Valid only upon the QUERY_SRQ command. */
+    pseudo_bit_t       reserved8[0x00010];
+/* -------------- */
+    pseudo_bit_t       reserved9[0x00020];
+/* -------------- */
+    pseudo_bit_t       db_record_addr_h[0x00020];/* SRQ DB Record physical address [63:32] */
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00002];
+    pseudo_bit_t       db_record_addr_l[0x0001e];/* SRQ DB Record physical address [31:2] */
+/* -------------- */
+}; 
+
+/* PBL */
+
+struct hermonprm_pbl_st {      /* Little Endian */
+    pseudo_bit_t       mtt_0_h[0x00020];      /* First MTT[63:32] */
+/* -------------- */
+    pseudo_bit_t       mtt_0_l[0x00020];      /* First MTT[31:0] */
+/* -------------- */
+    pseudo_bit_t       mtt_1_h[0x00020];      /* Second MTT[63:32] */
+/* -------------- */
+    pseudo_bit_t       mtt_1_l[0x00020];      /* Second MTT[31:0] */
+/* -------------- */
+    pseudo_bit_t       mtt_2_h[0x00020];      /* Third MTT[63:32] */
+/* -------------- */
+    pseudo_bit_t       mtt_2_l[0x00020];      /* Third MTT[31:0] */
+/* -------------- */
+    pseudo_bit_t       mtt_3_h[0x00020];      /* Fourth MTT[63:32] */
+/* -------------- */
+    pseudo_bit_t       mtt_3_l[0x00020];      /* Fourth MTT[31:0] */
+/* -------------- */
+}; 
+
+/* Performance Counters   #### michal - gdror fixed */
+
+struct hermonprm_performance_counters_st {     /* Little Endian */
+    pseudo_bit_t       reserved0[0x00080];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00080];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00080];
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00060];
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00620];
+/* -------------- */
+}; 
+
+/* Transport and CI Error Counters */
+
+struct hermonprm_transport_and_ci_error_counters_st {  /* Little Endian */
+    pseudo_bit_t       rq_num_lle[0x00020];   /* Responder - number of local length errors */
+/* -------------- */
+    pseudo_bit_t       sq_num_lle[0x00020];   /* Requester - number of local length errors */
+/* -------------- */
+    pseudo_bit_t       rq_num_lqpoe[0x00020]; /* Responder - number local QP operation error */
+/* -------------- */
+    pseudo_bit_t       sq_num_lqpoe[0x00020]; /* Requester - number local QP operation error */
+/* -------------- */
+    pseudo_bit_t       rq_num_leeoe[0x00020]; /* Responder - number local EE operation error */
+/* -------------- */
+    pseudo_bit_t       sq_num_leeoe[0x00020]; /* Requester - number local EE operation error */
+/* -------------- */
+    pseudo_bit_t       rq_num_lpe[0x00020];   /* Responder - number of local protection errors */
+/* -------------- */
+    pseudo_bit_t       sq_num_lpe[0x00020];   /* Requester - number of local protection errors */
+/* -------------- */
+    pseudo_bit_t       rq_num_wrfe[0x00020];  /* Responder - number of CQEs with error. 
+                                                 Incremented each time a CQE with error is generated */
+/* -------------- */
+    pseudo_bit_t       sq_num_wrfe[0x00020];  /* Requester - number of CQEs with error. 
+                                                 Incremented each time a CQE with error is generated */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00020];
+/* -------------- */
+    pseudo_bit_t       sq_num_mwbe[0x00020];  /* Requester - number of memory window bind errors */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00020];
+/* -------------- */
+    pseudo_bit_t       sq_num_bre[0x00020];   /* Requester - number of bad response errors */
+/* -------------- */
+    pseudo_bit_t       rq_num_lae[0x00020];   /* Responder - number of local access errors */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00040];
+/* -------------- */
+    pseudo_bit_t       sq_num_rire[0x00020];  /* Requester - number of remote invalid request errors
+                                                 NAK-Invalid Request on:
+                                                 1. Unsupported OpCode: Responder detected an unsupported OpCode.
+                                                 2. Unexpected OpCode: Responder detected an error in the sequence of OpCodes, such
+                                                 as a missing "Last" packet.
+                                                 Note: there is no PSN error, thus this does not indicate a dropped packet. */
+/* -------------- */
+    pseudo_bit_t       rq_num_rire[0x00020];  /* Responder - number of remote invalid request errors.
+                                                 NAK may or may not be sent.
+                                                 1. QP Async Affiliated Error: Unsupported or Reserved OpCode (RC,RD only):
+                                                 Inbound request OpCode was either reserved, or was for a function not supported by this
+                                                 QP. (E.g. RDMA or ATOMIC on QP not set up for this).
+                                                 2. Misaligned ATOMIC: VA does not point to an aligned address on an atomic opera-tion.
+                                                 3. Too many RDMA READ or ATOMIC Requests: There were more requests received
+                                                 and not ACKed than allowed for the connection.
+                                                 4. Out of Sequence OpCode, current packet is "First" or "Only": The Responder
+                                                 detected an error in the sequence of OpCodes; a missing "Last" packet
+                                                 5. Out of Sequence OpCode, current packet is not "First" or "Only": The Responder
+                                                 detected an error in the sequence of OpCodes; a missing "First" packet
+                                                 6. Local Length Error: Inbound "Send" request message exceeded the responder.s avail-able
+                                                 buffer space.
+                                                 7. Length error: RDMA WRITE request message contained too much or too little pay-load
+                                                 data compared to the DMA length advertised in the first or only packet.
+                                                 8. Length error: Payload length was not consistent with the opcode:
+                                                 a: 0 byte <= "only" <= PMTU bytes
+                                                 b: ("first" or "middle") == PMTU bytes
+                                                 c: 1byte <= "last" <= PMTU bytes
+                                                 9. Length error: Inbound message exceeded the size supported by the CA port. */
+/* -------------- */
+    pseudo_bit_t       sq_num_rae[0x00020];   /* Requester - number of remote access errors.
+                                                 NAK-Remote Access Error on:
+                                                 R_Key Violation: Responder detected an invalid R_Key while executing an RDMA
+                                                 Request. */
+/* -------------- */
+    pseudo_bit_t       rq_num_rae[0x00020];   /* Responder - number of remote access errors.
+                                                 R_Key Violation Responder detected an R_Key violation while executing an RDMA
+                                                 request.
+                                                 NAK may or may not be sent. */
+/* -------------- */
+    pseudo_bit_t       sq_num_roe[0x00020];   /* Requester - number of remote operation errors.
+                                                 NAK-Remote Operation Error on:
+                                                 Remote Operation Error: Responder encountered an error, (local to the responder),
+                                                 which prevented it from completing the request. */
+/* -------------- */
+    pseudo_bit_t       rq_num_roe[0x00020];   /* Responder - number of remote operation errors.
+                                                 NAK-Remote Operation Error on:
+                                                 1. Malformed WQE: Responder detected a malformed Receive Queue WQE while pro-cessing
+                                                 the packet.
+                                                 2. Remote Operation Error: Responder encountered an error, (local to the responder),
+                                                 which prevented it from completing the request. */
+/* -------------- */
+    pseudo_bit_t       sq_num_tree[0x00020];  /* Requester - number of transport retries exceeded errors */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+    pseudo_bit_t       sq_num_rree[0x00020];  /* Requester - number of RNR nak retries exceeded errors */
+/* -------------- */
+    pseudo_bit_t       rq_num_rnr[0x00020];   /* Responder - the number of RNR Naks sent */
+/* -------------- */
+    pseudo_bit_t       sq_num_rnr[0x00020];   /* Requester - the number of RNR Naks received */
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00040];
+/* -------------- */
+    pseudo_bit_t       reserved5[0x00020];
+/* -------------- */
+    pseudo_bit_t       sq_num_rabrte[0x00020];/* Requester - number of remote aborted errors */
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00020];
+/* -------------- */
+    pseudo_bit_t       sq_num_ieecne[0x00020];/* Requester - number of invalid EE context number errors */
+/* -------------- */
+    pseudo_bit_t       reserved7[0x00020];
+/* -------------- */
+    pseudo_bit_t       sq_num_ieecse[0x00020];/* Requester - invalid EE context state errors */
+/* -------------- */
+    pseudo_bit_t       reserved8[0x00380];
+/* -------------- */
+    pseudo_bit_t       rq_num_oos[0x00020];   /* Responder - number of out of sequence requests received */
+/* -------------- */
+    pseudo_bit_t       sq_num_oos[0x00020];   /* Requester - number of out of sequence Naks received */
+/* -------------- */
+    pseudo_bit_t       rq_num_mce[0x00020];   /* Responder - number of bad multicast packets received */
+/* -------------- */
+    pseudo_bit_t       reserved9[0x00020];
+/* -------------- */
+    pseudo_bit_t       rq_num_rsync[0x00020]; /* Responder - number of RESYNC operations */
+/* -------------- */
+    pseudo_bit_t       sq_num_rsync[0x00020]; /* Requester - number of RESYNC operations */
+/* -------------- */
+    pseudo_bit_t       rq_num_udsdprd[0x00020];/* The number of UD packets silently discarded on the receive queue due to lack of receive descriptor. */
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00020];
+/* -------------- */
+    pseudo_bit_t       rq_num_ucsdprd[0x00020];/* The number of UC packets silently discarded on the receive queue due to lack of receive descriptor. */
+/* -------------- */
+    pseudo_bit_t       reserved11[0x003e0];
+/* -------------- */
+    pseudo_bit_t       num_cqovf[0x00020];    /* Number of CQ overflows */
+/* -------------- */
+    pseudo_bit_t       num_eqovf[0x00020];    /* Number of EQ overflows */
+/* -------------- */
+    pseudo_bit_t       num_baddb[0x00020];    /* Number of bad doorbells */
+/* -------------- */
+    pseudo_bit_t       reserved12[0x002a0];
+/* -------------- */
+}; 
+
+/* Event_data Field - HCR Completion Event   #### michal - match PRM */
+
+struct hermonprm_hcr_completion_event_st {     /* Little Endian */
+    pseudo_bit_t       token[0x00010];        /* HCR Token */
+    pseudo_bit_t       reserved0[0x00010];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00020];
+/* -------------- */
+    pseudo_bit_t       status[0x00008];       /* HCR Status */
+    pseudo_bit_t       reserved2[0x00018];
+/* -------------- */
+    pseudo_bit_t       out_param_h[0x00020];  /* HCR Output Parameter [63:32] */
+/* -------------- */
+    pseudo_bit_t       out_param_l[0x00020];  /* HCR Output Parameter [31:0] */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+}; 
+
+/* Completion with Error CQE             #### michal - gdror fixed */
+
+struct hermonprm_completion_with_error_st {    /* Little Endian */
+    pseudo_bit_t       qpn[0x00018];          /* Indicates the QP for which completion is being reported */
+    pseudo_bit_t       reserved0[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x000a0];
+/* -------------- */
+    pseudo_bit_t       syndrome[0x00008];     /* Completion with error syndrome:
+                                                         0x01 - Local Length Error
+                                                         0x02 - Local QP Operation Error
+                                                         0x03 - Local EE Context Operation Error
+                                                         0x04 - Local Protection Error
+                                                         0x05 - Work Request Flushed Error 
+                                                         0x06 - Memory Window Bind Error
+                                                         0x10 - Bad Response Error
+                                                         0x11 - Local Access Error
+                                                         0x12 - Remote Invalid Request Error
+                                                         0x13 - Remote Access Error
+                                                         0x14 - Remote Operation Error
+                                                         0x15 - Transport Retry Counter Exceeded
+                                                         0x16 - RNR Retry Counter Exceeded
+                                                         0x20 - Local RDD Violation Error
+                                                         0x21 - Remote Invalid RD Request
+                                                         0x22 - Remote Aborted Error
+                                                         0x23 - Invalid EE Context Number
+                                                         0x24 - Invalid EE Context State
+                                                         other - Reserved
+                                                 Syndrome is defined according to the IB specification volume 1. For detailed explanation of the syndromes, refer to chapters 10-11 of the IB specification rev 1.1. */
+    pseudo_bit_t       vendor_error_syndrome[0x00008];
+    pseudo_bit_t       wqe_counter[0x00010];
+/* -------------- */
+    pseudo_bit_t       opcode[0x00005];       /* The opcode of WQE completion is reported for.
+                                                 
+                                                 The following values are reported in case of completion with error:
+                                                 0xFE - For completion with error on Receive Queues
+                                                 0xFF - For completion with error on Send Queues */
+    pseudo_bit_t       reserved2[0x00001];
+    pseudo_bit_t       s_r[0x00001];          /* send 1 / receive 0 */
+    pseudo_bit_t       owner[0x00001];        /* HW Flips this bit for every CQ warp around. Initialized to Zero. */
+    pseudo_bit_t       reserved3[0x00018];
+/* -------------- */
+}; 
+
+/* Resize CQ Input Mailbox */
+
+struct hermonprm_resize_cq_st {        /* Little Endian */
+    pseudo_bit_t       reserved0[0x00040];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00006];
+    pseudo_bit_t       page_offset[0x00006];
+    pseudo_bit_t       reserved2[0x00014];
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00018];
+    pseudo_bit_t       log_cq_size[0x00005];  /* Log (base 2) of the CQ size (in entries) */
+    pseudo_bit_t       reserved4[0x00003];
+/* -------------- */
+    pseudo_bit_t       reserved5[0x00020];
+/* -------------- */
+    pseudo_bit_t       mtt_base_addr_h[0x00008];
+    pseudo_bit_t       reserved6[0x00010];
+    pseudo_bit_t       log2_page_size[0x00006];
+    pseudo_bit_t       reserved7[0x00002];
+/* -------------- */
+    pseudo_bit_t       reserved8[0x00003];
+    pseudo_bit_t       mtt_base_addr_l[0x0001d];
+/* -------------- */
+    pseudo_bit_t       reserved9[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00100];
+/* -------------- */
+}; 
+
+/* MAD_IFC Input Modifier */
+
+struct hermonprm_mad_ifc_input_modifier_st {   /* Little Endian */
+    pseudo_bit_t       port_number[0x00008];  /* The packet reception port number (1 or 2). */
+    pseudo_bit_t       mad_extended_info[0x00001];/* Mad_Extended_Info valid bit (MAD_IFC Input Mailbox data from offset 00100h and down). MAD_Extended_Info is read only if this bit is set.
+                                                 Required for trap generation when BKey check is enabled and for global routed packets. */
+    pseudo_bit_t       reserved0[0x00007];
+    pseudo_bit_t       rlid[0x00010];         /* Remote (source) LID  from the received MAD.
+                                                 This field is required for trap generation upon MKey/BKey validation. */
+/* -------------- */
+}; 
+
+/* MAD_IFC Input Mailbox     ###michal -gdror fixed */
+
+struct hermonprm_mad_ifc_st {  /* Little Endian */
+    pseudo_bit_t       request_mad_packet[64][0x00020];/* Request MAD Packet (256bytes) */
+/* -------------- */
+    pseudo_bit_t       my_qpn[0x00018];       /* Destination QP number from the received MAD. 
+                                                 This field is reserved if Mad_extended_info indication in the input modifier is clear. */
+    pseudo_bit_t       reserved0[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00020];
+/* -------------- */
+    pseudo_bit_t       rqpn[0x00018];         /* Remote (source) QP number  from the received MAD.
+                                                 This field is reserved if Mad_extended_info indication in the input modifier is clear. */
+    pseudo_bit_t       reserved2[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00010];
+    pseudo_bit_t       ml_path[0x00007];      /* My (destination) LID path bits  from the received MAD.
+                                                 This field is reserved if Mad_extended_info indication in the input modifier is clear. */
+    pseudo_bit_t       g[0x00001];            /* If set, the GRH field in valid. 
+                                                 This field is reserved if Mad_extended_info indication in the input modifier is clear. */
+    pseudo_bit_t       reserved4[0x00004];
+    pseudo_bit_t       sl[0x00004];           /* Service Level of the received MAD.
+                                                 This field is reserved if Mad_extended_info indication in the input modifier is clear. */
+/* -------------- */
+    pseudo_bit_t       pkey_indx[0x00010];    /* Index in PKey table that matches PKey of the received MAD. 
+                                                 This field is reserved if Mad_extended_info indication in the input modifier is clear. */
+    pseudo_bit_t       reserved5[0x00010];
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00160];
+/* -------------- */
+    pseudo_bit_t       grh[10][0x00020];      /* The GRH field of the MAD packet that was scattered to the first 40 bytes pointed to by the scatter list. 
+                                                 Valid if Mad_extended_info bit (in the input modifier) and g bit are set. 
+                                                 Otherwise this field is reserved. */
+/* -------------- */
+    pseudo_bit_t       reserved7[0x004c0];
+/* -------------- */
+}; 
+
+/* Query Debug Message     #### michal - gdror fixed */
+
+struct hermonprm_query_debug_msg_st {  /* Little Endian */
+    pseudo_bit_t       phy_addr_h[0x00020];   /* Translation of the address in firmware area. High 32 bits. */
+/* -------------- */
+    pseudo_bit_t       v[0x00001];            /* Physical translation is valid */
+    pseudo_bit_t       reserved0[0x0000b];
+    pseudo_bit_t       phy_addr_l[0x00014];   /* Translation of the address in firmware area. Low 32 bits. */
+/* -------------- */
+    pseudo_bit_t       fw_area_base[0x00020]; /* Firmware area base address. The format strings and the trace buffers may be located starting from this address. */
+/* -------------- */
+    pseudo_bit_t       fw_area_size[0x00020]; /* Firmware area size */
+/* -------------- */
+    pseudo_bit_t       trc_hdr_sz[0x00020];   /* Trace message header size in dwords. */
+/* -------------- */
+    pseudo_bit_t       trc_arg_num[0x00020];  /* The number of arguments per trace message. */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x000c0];
+/* -------------- */
+    pseudo_bit_t       dbg_msk_h[0x00020];    /* Debug messages mask [63:32] */
+/* -------------- */
+    pseudo_bit_t       dbg_msk_l[0x00020];    /* Debug messages mask [31:0] */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00040];
+/* -------------- */
+    pseudo_bit_t       buff0_addr[0x00020];   /* Address in firmware area of Trace Buffer 0 */
+/* -------------- */
+    pseudo_bit_t       buff0_size[0x00020];   /* Size of Trace Buffer 0 */
+/* -------------- */
+    pseudo_bit_t       buff1_addr[0x00020];   /* Address in firmware area of Trace Buffer 1 */
+/* -------------- */
+    pseudo_bit_t       buff1_size[0x00020];   /* Size of Trace Buffer 1 */
+/* -------------- */
+    pseudo_bit_t       buff2_addr[0x00020];   /* Address in firmware area of Trace Buffer 2 */
+/* -------------- */
+    pseudo_bit_t       buff2_size[0x00020];   /* Size of Trace Buffer 2 */
+/* -------------- */
+    pseudo_bit_t       buff3_addr[0x00020];   /* Address in firmware area of Trace Buffer 3 */
+/* -------------- */
+    pseudo_bit_t       buff3_size[0x00020];   /* Size of Trace Buffer 3 */
+/* -------------- */
+    pseudo_bit_t       buff4_addr[0x00020];   /* Address in firmware area of Trace Buffer 4 */
+/* -------------- */
+    pseudo_bit_t       buff4_size[0x00020];   /* Size of Trace Buffer 4 */
+/* -------------- */
+    pseudo_bit_t       buff5_addr[0x00020];   /* Address in firmware area of Trace Buffer 5 */
+/* -------------- */
+    pseudo_bit_t       buff5_size[0x00020];   /* Size of Trace Buffer 5 */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00080];
+/* -------------- */
+    pseudo_bit_t       hw_buff_addr[0x00020]; /* Dror Mux Bohrer tracer */
+/* -------------- */
+    pseudo_bit_t       hw_buff_size[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved4[0x003c0];
+/* -------------- */
+}; 
+
+/* User Access Region */
+
+struct hermonprm_uar_st {      /* Little Endian */
+    struct hermonprm_rd_send_doorbell_st       rd_send_doorbell;/* Reliable Datagram send doorbell */
+/* -------------- */
+    struct hermonprm_send_doorbell_st  send_doorbell;/* Send doorbell */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00040];
+/* -------------- */
+    struct hermonprm_cq_cmd_doorbell_st        cq_command_doorbell;/* CQ Doorbell */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x03ec0];
+/* -------------- */
+}; 
+
+/* Receive doorbell */
+
+struct hermonprm_receive_doorbell_st { /* Little Endian */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       wqe_counter[0x00010];  /* Modulo-64K counter of WQEs posted on this queue since its creation. Should be zero for the first doorbell on the QP */
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00005];
+    pseudo_bit_t       srq[0x00001];          /* If set, this is a Shared Receive Queue */
+    pseudo_bit_t       reserved3[0x00002];
+    pseudo_bit_t       qpn[0x00018];          /* QP number or SRQ number this doorbell is rung on */
+/* -------------- */
+}; 
+
+/* SET_IB Parameters */
+
+struct hermonprm_set_ib_st {   /* Little Endian */
+    pseudo_bit_t       rqk[0x00001];          /* Reset QKey Violation Counter */
+    pseudo_bit_t       reserved0[0x00011];
+    pseudo_bit_t       sig[0x00001];          /* Set System Image GUID to system_image_guid specified.
+                                                 system_image_guid and sig must be the same for all ports. */
+    pseudo_bit_t       reserved1[0x0000d];
+/* -------------- */
+    pseudo_bit_t       capability_mask[0x00020];/* PortInfo Capability Mask */
+/* -------------- */
+    pseudo_bit_t       system_image_guid_h[0x00020];/* System Image GUID[63:32], takes effect only if the SIG bit is set
+                                                 Must be the same for both ports. */
+/* -------------- */
+    pseudo_bit_t       system_image_guid_l[0x00020];/* System Image GUID[31:0], takes effect only if the SIG bit is set
+                                                 Must be the same for both ports. */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00180];
+/* -------------- */
+}; 
+
+/* Multicast Group Member    #### michal - gdror fixed */
+
+struct hermonprm_mgm_entry_st {        /* Little Endian */
+    pseudo_bit_t       reserved0[0x00006];
+    pseudo_bit_t       next_gid_index[0x0001a];/* Index of next Multicast Group Member whose GID maps to same MGID_HASH number.
+                                                 The index is into the Multicast Group Table, which is the comprised the MGHT and AMGM tables.
+                                                 next_gid_index=0 means end of the chain. */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00060];
+/* -------------- */
+    pseudo_bit_t       mgid_128_96[0x00020];  /* Multicast group GID[128:96] in big endian format.
+                                                 Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */
+/* -------------- */
+    pseudo_bit_t       mgid_95_64[0x00020];   /* Multicast group GID[95:64] in big endian format.
+                                                 Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */
+/* -------------- */
+    pseudo_bit_t       mgid_63_32[0x00020];   /* Multicast group GID[63:32] in big endian format.
+                                                 Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */
+/* -------------- */
+    pseudo_bit_t       mgid_31_0[0x00020];    /* Multicast group GID[31:0] in big endian format.
+                                                 Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_0;   /* Multicast Group Member QP */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_1;   /* Multicast Group Member QP */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_2;   /* Multicast Group Member QP */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_3;   /* Multicast Group Member QP */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_4;   /* Multicast Group Member QP */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_5;   /* Multicast Group Member QP */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_6;   /* Multicast Group Member QP */
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp_7;   /* Multicast Group Member QP */
+/* -------------- */
+}; 
+
+/* INIT_PORT Parameters    #### michal - match PRM */
+
+struct hermonprm_init_port_st {        /* Little Endian */
+    pseudo_bit_t       reserved0[0x00004];
+    pseudo_bit_t       vl_cap[0x00004];       /* Maximum VLs supported on the port, excluding VL15.
+                                                 Legal values are 1,2,4 and 8. */
+    pseudo_bit_t       port_width_cap[0x00004];/* IB Port Width
+                                                 1   - 1x
+                                                 3   - 1x, 4x
+                                                 11 - 1x, 4x or 12x (must not be used in InfiniHost-III-EX MT25208)
+                                                 else - Reserved */
+    pseudo_bit_t       reserved1[0x00004];
+    pseudo_bit_t       g0[0x00001];           /* Set port GUID0 to GUID0 specified */
+    pseudo_bit_t       ng[0x00001];           /* Set node GUID to node_guid specified.
+                                                 node_guid and ng must be the same for all ports. */
+    pseudo_bit_t       sig[0x00001];          /* Set System Image GUID to system_image_guid specified.
+                                                 system_image_guid and sig must be the same for all ports. */
+    pseudo_bit_t       reserved2[0x0000d];
+/* -------------- */
+    pseudo_bit_t       max_gid[0x00010];      /* Maximum number of GIDs for the port */
+    pseudo_bit_t       mtu[0x00010];          /* Maximum MTU Supported in bytes
+                                                 must be: 256, 512, 1024, 2048 or 4096
+                                                 For Eth port, can be any
+                                                 Field must not cross device capabilities as reported
+                                                  */
+/* -------------- */
+    pseudo_bit_t       max_pkey[0x00010];     /* Maximum pkeys for the port.
+                                                 Must be the same for both ports. */
+    pseudo_bit_t       reserved3[0x00010];
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00020];
+/* -------------- */
+    pseudo_bit_t       guid0_h[0x00020];      /* EUI-64 GUID assigned by the manufacturer, takes effect only if the G0 bit is set (bits 63:32) */
+/* -------------- */
+    pseudo_bit_t       guid0_l[0x00020];      /* EUI-64 GUID assigned by the manufacturer, takes effect only if the G0 bit is set (bits 31:0) */
+/* -------------- */
+    pseudo_bit_t       node_guid_h[0x00020];  /* Node GUID[63:32], takes effect only if the NG bit is set
+                                                 Must be the same for both ports. */
+/* -------------- */
+    pseudo_bit_t       node_guid_l[0x00020];  /* Node GUID[31:0], takes effect only if the NG bit is set
+                                                 Must be the same for both ports. */
+/* -------------- */
+    pseudo_bit_t       system_image_guid_h[0x00020];/* System Image GUID[63:32], takes effect only if the SIG bit is set
+                                                 Must be the same for both ports. */
+/* -------------- */
+    pseudo_bit_t       system_image_guid_l[0x00020];/* System Image GUID[31:0], takes effect only if the SIG bit is set
+                                                 Must be the same for both ports. */
+/* -------------- */
+    pseudo_bit_t       reserved5[0x006c0];
+/* -------------- */
+}; 
+
+/* Query Device Capablities     #### michal - gdror fixed */
+
+struct hermonprm_query_dev_cap_st {    /* Little Endian */
+    pseudo_bit_t       reserved0[0x00080];
+/* -------------- */
+    pseudo_bit_t       log_max_qp[0x00005];   /* Log2 of the Maximum number of QPs supported */
+    pseudo_bit_t       reserved1[0x00003];
+    pseudo_bit_t       log2_rsvd_qps[0x00004];/* Log (base 2) of the number of QPs reserved for firmware use
+                                                 The reserved resources are numbered from 0 to 2^log2_rsvd_qps-1 */
+    pseudo_bit_t       reserved2[0x00004];
+    pseudo_bit_t       log_max_qp_sz[0x00008];/* The maximum number of WQEs allowed on the RQ or the SQ is 2^log_max_qp_sz-1 */
+    pseudo_bit_t       log_max_srq_sz[0x00008];/* The maximum number of WQEs allowed on the SRQ is 2^log_max_srq_sz-1 */
+/* -------------- */
+    pseudo_bit_t       log_max_scqs[0x00004]; /* log base 2 of number of supported schedule queues */
+    pseudo_bit_t       reserved3[0x00004];
+    pseudo_bit_t       num_rsvd_scqs[0x00006];
+    pseudo_bit_t       reserved4[0x00002];
+    pseudo_bit_t       log_max_srqs[0x00005];
+    pseudo_bit_t       reserved5[0x00007];
+    pseudo_bit_t       log2_rsvd_srqs[0x00004];
+/* -------------- */
+    pseudo_bit_t       log_max_cq[0x00005];   /* Log2 of the Maximum number of CQs supported */
+    pseudo_bit_t       reserved6[0x00003];
+    pseudo_bit_t       log2_rsvd_cqs[0x00004];/* Log (base 2) of the number of CQs reserved for firmware use
+                                                 The reserved resources are numbered from 0 to 2^log2_rsrvd_cqs-1 */
+    pseudo_bit_t       reserved7[0x00004];
+    pseudo_bit_t       log_max_cq_sz[0x00008];/* Log2 of the Maximum CQEs allowed in a CQ */
+    pseudo_bit_t       reserved8[0x00008];
+/* -------------- */
+    pseudo_bit_t       log_max_eq[0x00004];   /* Log2 of the Maximum number of EQs */
+    pseudo_bit_t       reserved9[0x00004];
+    pseudo_bit_t       num_rsvd_eqs[0x00004]; /* The number of EQs reserved for firmware use
+                                                 The reserved resources are numbered from 0 to num_rsvd_eqs-1
+                                                 If 0 - no resources are reserved. */
+    pseudo_bit_t       reserved10[0x00004];
+    pseudo_bit_t       log_max_d_mpts[0x00006];/* Log (base 2) of the maximum number of data MPT entries (the number of Regions/Windows) */
+    pseudo_bit_t       reserved11[0x00002];
+    pseudo_bit_t       log_max_eq_sz[0x00008];/* Log2 of the Maximum EQEs allowed in a EQ */
+/* -------------- */
+    pseudo_bit_t       log_max_mtts[0x00006]; /* Log2 of the Maximum number of MTT entries */
+    pseudo_bit_t       reserved12[0x00002];
+    pseudo_bit_t       log2_rsvd_mrws[0x00004];/* Log (base 2) of the number of MPTs reserved for firmware use
+                                                 The reserved resources are numbered from 0 to 2^log2_rsvd_mrws-1 */
+    pseudo_bit_t       reserved13[0x00004];
+    pseudo_bit_t       log_max_mrw_sz[0x00007];/* Log2 of the Maximum Size of Memory Region/Window. is it in PRM layout? */
+    pseudo_bit_t       reserved14[0x00005];
+    pseudo_bit_t       log2_rsvd_mtts[0x00004];/* Log (base 2) of the number of MTT entries reserved for firmware use
+                                                 The reserved resources are numbered from 0 to 2^log2_rsvd_mtts-1
+                                                  */
+/* -------------- */
+    pseudo_bit_t       reserved15[0x00020];
+/* -------------- */
+    pseudo_bit_t       log_max_ra_res_qp[0x00006];/* Log2 of the Maximum number of outstanding RDMA read/Atomic per QP as a responder */
+    pseudo_bit_t       reserved16[0x0000a];
+    pseudo_bit_t       log_max_ra_req_qp[0x00006];/* Log2 of the maximum number of outstanding RDMA read/Atomic per QP as a requester */
+    pseudo_bit_t       reserved17[0x0000a];
+/* -------------- */
+    pseudo_bit_t       log_max_ra_res_global[0x00006];/* Log2 of the maximum number of RDMA read/atomic operations the HCA responder can support globally. That implies the RDB table size. */
+    pseudo_bit_t       reserved18[0x0001a];
+/* -------------- */
+    pseudo_bit_t       rsz_srq[0x00001];      /* Ability to modify the maximum number of WRs per SRQ. */
+    pseudo_bit_t       reserved19[0x0001f];
+/* -------------- */
+    pseudo_bit_t       num_ports[0x00004];    /* Number of IB ports. */
+    pseudo_bit_t       max_vl_ib[0x00004];    /* Maximum VLs supported on each port, excluding VL15 */
+    pseudo_bit_t       ib_port_width[0x00004];/* IB Port Width
+                                                 1   - 1x
+                                                 3   - 1x, 4x
+                                                 11 - 1x, 4x or 12x
+                                                 else - Reserved */
+    pseudo_bit_t       ib_mtu[0x00004];       /* Maximum MTU Supported
+                                                 0x0 - Reserved
+                                                 0x1 - 256
+                                                 0x2 - 512
+                                                 0x3 - 1024
+                                                 0x4 - 2048
+                                                 0x5 - 4096
+                                                 0x6-0xF Reserved */
+    pseudo_bit_t       local_ca_ack_delay[0x00005];/* The Local CA ACK Delay. This is the value recommended to be returned in Query HCA verb.
+                                                 The delay value in microseconds is computed using 4.096us * 2^(local_ca_ack_delay). */
+    pseudo_bit_t       port_type[0x00004];    /* Hermon New. bit per port. bit0 is first port. value '1' is ehternet. '0' is IB */
+    pseudo_bit_t       reserved20[0x00004];
+    pseudo_bit_t       w[0x00001];            /* Hermon New. 10GB eth support */
+    pseudo_bit_t       j[0x00001];            /* Hermon New. Jumbo frame support */
+    pseudo_bit_t       reserved21[0x00001];
+/* -------------- */
+    pseudo_bit_t       log_max_gid[0x00004];  /* Log2 of the maximum number of GIDs per port */
+    pseudo_bit_t       reserved22[0x00004];
+    pseudo_bit_t       log_ethtype[0x00004];  /* Hermon New. log2 eth type table size */
+    pseudo_bit_t       reserved23[0x00004];
+    pseudo_bit_t       log_drain_size[0x00008];/* Log (base 2) of minimum size of the NoDropVLDrain buffer, specified in 4Kpages units */
+    pseudo_bit_t       log_max_msg[0x00005];  /* Log (base 2) of the maximum message size supported by the device */
+    pseudo_bit_t       reserved24[0x00003];
+/* -------------- */
+    pseudo_bit_t       log_max_pkey[0x00004]; /* Log2 of the max PKey Table Size (per IB port) */
+    pseudo_bit_t       reserved25[0x0000c];
+    pseudo_bit_t       stat_rate_support[0x00010];/* bit mask of stat rate supported
+                                                 bit 0 - full bw
+                                                 bit 1 - 1/4 bw
+                                                 bit 2 - 1/8 bw
+                                                 bit 3 - 1/2 bw; */
+/* -------------- */
+    pseudo_bit_t       reserved26[0x00020];
+/* -------------- */
+    pseudo_bit_t       rc[0x00001];           /* RC Transport supported */
+    pseudo_bit_t       uc[0x00001];           /* UC Transport Supported */
+    pseudo_bit_t       ud[0x00001];           /* UD Transport Supported */
+    pseudo_bit_t       src[0x00001];          /* SRC Transport Supported. Hermon New instead of RD. */
+    pseudo_bit_t       rcm[0x00001];          /* Reliable Multicast support. Hermon New instead of IPv6 Transport Supported */
+    pseudo_bit_t       fcoib[0x00001];        /* Hermon New */
+    pseudo_bit_t       srq[0x00001];          /* SRQ is supported
+                                                  */
+    pseudo_bit_t       checksum[0x00001];     /* IP over IB checksum is supported */
+    pseudo_bit_t       pkv[0x00001];          /* PKey Violation Counter Supported */
+    pseudo_bit_t       qkv[0x00001];          /* QKey Violation Coutner Supported */
+    pseudo_bit_t       vmm[0x00001];          /* Hermon New */
+    pseudo_bit_t       reserved27[0x00005];
+    pseudo_bit_t       mw[0x00001];           /* Memory windows supported */
+    pseudo_bit_t       apm[0x00001];          /* Automatic Path Migration Supported */
+    pseudo_bit_t       atm[0x00001];          /* Atomic operations supported (atomicity is guaranteed between QPs on this HCA) */
+    pseudo_bit_t       rm[0x00001];           /* Raw Multicast Supported */
+    pseudo_bit_t       avp[0x00001];          /* Address Vector Port checking supported */
+    pseudo_bit_t       udm[0x00001];          /* UD Multicast Supported */
+    pseudo_bit_t       reserved28[0x00002];
+    pseudo_bit_t       pg[0x00001];           /* Paging on demand supported */
+    pseudo_bit_t       r[0x00001];            /* Router mode supported */
+    pseudo_bit_t       reserved29[0x00006];
+/* -------------- */
+    pseudo_bit_t       log_pg_sz[0x00008];    /* Minimum system page size supported (log2).
+                                                 For proper operation it must be less than or equal the hosting platform (CPU) minimum page size. */
+    pseudo_bit_t       reserved30[0x00008];
+    pseudo_bit_t       uar_sz[0x00006];       /* UAR Area Size = 1MB * 2^uar_sz */
+    pseudo_bit_t       reserved31[0x00006];
+    pseudo_bit_t       num_rsvd_uars[0x00004];/* The number of UARs reserved for firmware use
+                                                 The reserved resources are numbered from 0 to num_reserved_uars-1
+                                                 Note that UAR number num_reserved_uars is always for the kernel. */
+/* -------------- */
+    pseudo_bit_t       log_max_bf_pages[0x00006];/* Maximum number of BlueFlame pages is 2^log_max_bf_pages */
+    pseudo_bit_t       reserved32[0x00002];
+    pseudo_bit_t       log_max_bf_regs_per_page[0x00006];/* Maximum number of BlueFlame registers per page is 2^log_max_bf_regs_per_page. It may be that only the beginning of a page contains BlueFlame registers. */
+    pseudo_bit_t       reserved33[0x00002];
+    pseudo_bit_t       log_bf_reg_size[0x00005];/* BlueFlame register size in bytes is 2^log_bf_reg_size */
+    pseudo_bit_t       reserved34[0x0000a];
+    pseudo_bit_t       bf[0x00001];           /* If set to "1" then BlueFlame may be used. */
+/* -------------- */
+    pseudo_bit_t       max_desc_sz_sq[0x00010];/* Max descriptor size in bytes for the send queue */
+    pseudo_bit_t       max_sg_sq[0x00008];    /* The maximum S/G list elements in a SQ WQE (max_desc_sz/16 - 3) */
+    pseudo_bit_t       reserved35[0x00008];
+/* -------------- */
+    pseudo_bit_t       max_desc_sz_rq[0x00010];/* Max descriptor size in bytes for the receive queue */
+    pseudo_bit_t       max_sg_rq[0x00008];    /* The maximum S/G list elements in a RQ WQE (max_desc_sz/16 - 3) */
+    pseudo_bit_t       reserved36[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved37[0x00001];
+    pseudo_bit_t       fexch_base_mpt_31_25[0x00007];/* Hermon New. FC mpt base mpt number */
+    pseudo_bit_t       fcp_ud_base_23_8[0x00010];/* Hermon New. FC ud QP  base QPN */
+    pseudo_bit_t       fexch_base_qp_23_16[0x00008];/* Hermon New. FC Exchange QP base QPN */
+/* -------------- */
+    pseudo_bit_t       reserved38[0x00020];
+/* -------------- */
+    pseudo_bit_t       log_max_mcg[0x00008];  /* Log2 of the maximum number of multicast groups */
+    pseudo_bit_t       num_rsvd_mcgs[0x00004];/* The number of MGMs reserved for firmware use in the MGHT.
+                                                 The reserved resources are numbered from 0 to num_reserved_mcgs-1
+                                                 If 0 - no resources are reserved. */
+    pseudo_bit_t       reserved39[0x00004];
+    pseudo_bit_t       log_max_qp_mcg[0x00008];/* Log2 of the maximum number of QPs per multicast group */
+    pseudo_bit_t       reserved40[0x00008];
+/* -------------- */
+    pseudo_bit_t       log_max_srcds[0x00004];/* Log2 of the maximum number of SRC Domains */
+    pseudo_bit_t       reserved41[0x00008];
+    pseudo_bit_t       num_rsvd_scrds[0x00004];/* The number of SRCDs reserved for firmware use
+                                                 The reserved resources are numbered from 0 to num_reserved_rdds-1.
+                                                 If 0 - no resources are reserved. */
+    pseudo_bit_t       log_max_pd[0x00005];   /* Log2 of the maximum number of PDs */
+    pseudo_bit_t       reserved42[0x00007];
+    pseudo_bit_t       num_rsvd_pds[0x00004]; /* The number of PDs reserved for firmware use
+                                                 The reserved resources are numbered from 0 to num_reserved_pds-1
+                                                 If 0 - no resources are reserved. */
+/* -------------- */
+    pseudo_bit_t       reserved43[0x000c0];
+/* -------------- */
+    pseudo_bit_t       qpc_entry_sz[0x00010]; /* QPC Entry Size for the device
+                                                 For the InfiniHost-III-EX MT25208 entry size is 256 bytes */
+    pseudo_bit_t       rdmardc_entry_sz[0x00010];/* RdmaRdC Entry Size for the device
+                                                 For the InfiniHost-III-EX MT25208 entry size is 256 bytes */
+/* -------------- */
+    pseudo_bit_t       altc_entry_sz[0x00010];/* Extended QPC entry size for the device
+                                                 For the InfiniHost-III-EX MT25208 entry size is 32 bytes */
+    pseudo_bit_t       aux_entry_sz[0x00010]; /* Auxilary context entry size */
+/* -------------- */
+    pseudo_bit_t       cqc_entry_sz[0x00010]; /* CQC entry size for the device
+                                                 For the InfiniHost-III-EX MT25208 entry size is 64 bytes */
+    pseudo_bit_t       eqc_entry_sz[0x00010]; /* EQ context entry size for the device
+                                                 For the InfiniHost-III-EX MT25208 entry size is 64 bytes */
+/* -------------- */
+    pseudo_bit_t       c_mpt_entry_sz[0x00010];/* cMPT entry size in Bytes for the device.
+                                                 For the InfiniHost-III-EX MT25208 entry size is 64 bytes */
+    pseudo_bit_t       srq_entry_sz[0x00010]; /* SRQ context entry size for the device
+                                                 For the InfiniHost-III-EX MT25208 entry size is 32 bytes */
+/* -------------- */
+    pseudo_bit_t       d_mpt_entry_sz[0x00010];/* dMPT entry size in Bytes for the device.
+                                                 For the InfiniHost-III-EX MT25208 entry size is 64 bytes */
+    pseudo_bit_t       mtt_entry_sz[0x00010]; /* MTT entry size in Bytes for the device.
+                                                 For the InfiniHost-III-EX MT25208 entry size is 8 bytes */
+/* -------------- */
+    pseudo_bit_t       bmme[0x00001];         /* Base Memory Management Extension Support */
+    pseudo_bit_t       win_type[0x00001];     /* Bound Type 2 Memory Window Association mechanism:
+                                                 0 - Type 2A - QP Number Association; or
+                                                 1 - Type 2B - QP Number and PD Association. */
+    pseudo_bit_t       mps[0x00001];          /* Ability of this HCA to support multiple page sizes per Memory Region. */
+    pseudo_bit_t       bl[0x00001];           /* Ability of this HCA to support Block List Physical Buffer Lists. */
+    pseudo_bit_t       zb[0x00001];           /* Zero Based region/windows supported */
+    pseudo_bit_t       lif[0x00001];          /* Ability of this HCA to support Local Invalidate Fencing. */
+    pseudo_bit_t       reserved44[0x0001a];
+/* -------------- */
+    pseudo_bit_t       resd_lkey[0x00020];    /* The value of the reserved Lkey for Base Memory Management Extension */
+/* -------------- */
+    pseudo_bit_t       reserved45[0x00020];
+/* -------------- */
+    pseudo_bit_t       max_icm_size_h[0x00020];/* Bits [63:32] of maximum ICM size InfiniHost III Ex support in bytes. */
+/* -------------- */
+    pseudo_bit_t       max_icm_size_l[0x00020];/* Bits [31:0] of maximum ICM size InfiniHost III Ex support in bytes. */
+/* -------------- */
+    pseudo_bit_t       reserved46[0x002c0];
+/* -------------- */
+}; 
+
+/* QUERY_ADAPTER Parameters Block    #### michal - gdror fixed */
+
+struct hermonprm_query_adapter_st {    /* Little Endian */
+    pseudo_bit_t       reserved0[0x00080];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00018];
+    pseudo_bit_t       intapin[0x00008];      /* Driver should set this field to INTR value in the event queue in order to get Express interrupt messages. */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00060];
+/* -------------- */
+    struct hermonprm_vsd_st    vsd;         /* ###michal- this field was replaced by 2 fields : vsd .1664; vsd(continued/psid .128; */
+/* -------------- */
+}; 
+
+/* QUERY_FW Parameters Block      #### michal - doesn't match PRM */
+
+struct hermonprm_query_fw_st { /* Little Endian */
+    pseudo_bit_t       fw_rev_major[0x00010]; /* Firmware Revision - Major */
+    pseudo_bit_t       fw_pages[0x00010];     /* Amount of physical memory to be allocated for FW usage is in 4KByte pages. */
+/* -------------- */
+    pseudo_bit_t       fw_rev_minor[0x00010]; /* Firmware Revision - Minor */
+    pseudo_bit_t       fw_rev_subminor[0x00010];/* Firmware Sub-minor version (Patch level). */
+/* -------------- */
+    pseudo_bit_t       cmd_interface_rev[0x00010];/* Command Interface Interpreter Revision ID */
+    pseudo_bit_t       reserved0[0x00010];
+/* -------------- */
+    pseudo_bit_t       log_max_outstanding_cmd[0x00008];/* Log2 of the maximum number of commands the HCR can support simultaneously */
+    pseudo_bit_t       reserved1[0x00017];
+    pseudo_bit_t       dt[0x00001];           /* Debug Trace Support
+                                                 0 - Debug trace is not supported 
+                                                 1 - Debug trace is supported */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00001];
+    pseudo_bit_t       ccq[0x00001];          /* CCQ support */
+    pseudo_bit_t       reserved3[0x00006];
+    pseudo_bit_t       fw_seconds[0x00008];   /* FW timestamp - seconds. Dispalyed as Hexadecimal number */
+    pseudo_bit_t       fw_minutes[0x00008];   /* FW timestamp - minutes. Dispalyed as Hexadecimal number */
+    pseudo_bit_t       fw_hour[0x00008];      /* FW timestamp - hour.    Dispalyed as Hexadecimal number */
+/* -------------- */
+    pseudo_bit_t       fw_day[0x00008];       /* FW timestamp - day.     Dispalyed as Hexadecimal number */
+    pseudo_bit_t       fw_month[0x00008];     /* FW timestamp - month.   Dispalyed as Hexadecimal number */
+    pseudo_bit_t       fw_year[0x00010];      /* FW timestamp - year.    Dispalyed as Hexadecimal number (e.g. 0x2005) */
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00040];
+/* -------------- */
+    pseudo_bit_t       clr_int_base_offset_h[0x00020];/* Bits [63:32] of the Clear Interrupt register\92s offset from clr_int_bar register in PCIaddress space. Points to a 64-bit register. */
+/* -------------- */
+    pseudo_bit_t       clr_int_base_offset_l[0x00020];/* Bits [31:0] of the Clear Interrupt register\92s offset from clr_int_bar register in PCIaddress space. Points to a 64-bit register. */
+/* -------------- */
+    pseudo_bit_t       reserved5[0x0001e];
+    pseudo_bit_t       clr_int_bar[0x00002];  /* PCI base address register (BAR) where clr_int register is located.
+                                                 00 - BAR 0-1
+                                                 01 - BAR 2-3
+                                                 10 - BAR 4-5
+                                                 11 - Reserved
+                                                 The PCI BARs of ConnectX are 64 bit BARs.
+                                                 In ConnectX, clr_int register is located on BAR 0-1. */
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00020];
+/* -------------- */
+    pseudo_bit_t       error_buf_offset_h[0x00020];/* Read Only buffer for catastrophic error reports (bits [63:32] of offset from error_buf_bar register in PCI address space.) */
+/* -------------- */
+    pseudo_bit_t       error_buf_offset_l[0x00020];/* Read Only buffer for catastrophic error reports (bits [31:0]  of offset from error_buf_bar register in PCI address space.) */
+/* -------------- */
+    pseudo_bit_t       error_buf_size[0x00020];/* Size in words */
+/* -------------- */
+    pseudo_bit_t       reserved7[0x0001e];
+    pseudo_bit_t       error_buf_bar[0x00002];/* PCI base address register (BAR) where error_buf register is located.
+                                                 00 - BAR 0-1
+                                                 01 - BAR 2-3
+                                                 10 - BAR 4-5
+                                                 11 - Reserved
+                                                 The PCI BARs of ConnectX are 64 bit BARs.
+                                                 In ConnectX, error_buf register is located on BAR 0-1. */
+/* -------------- */
+    pseudo_bit_t       reserved8[0x00600];
+/* -------------- */
+}; 
+
+/* Memory Access Parameters for UD Address Vector Table */
+
+struct hermonprm_udavtable_memory_parameters_st {      /* Little Endian */
+    pseudo_bit_t       l_key[0x00020];        /* L_Key used to access TPT */
+/* -------------- */
+    pseudo_bit_t       pd[0x00018];           /* PD used by TPT for matching against PD of region entry being accessed. */
+    pseudo_bit_t       reserved0[0x00005];
+    pseudo_bit_t       xlation_en[0x00001];   /* When cleared, address is physical address and no translation will be done. When set, address is virtual. */
+    pseudo_bit_t       reserved1[0x00002];
+/* -------------- */
+}; 
+
+/* INIT_HCA & QUERY_HCA Parameters Block ####michal-doesn't match PRM (see differs below) new size in bytes:0x300 */
+
+struct hermonprm_init_hca_st { /* Little Endian */
+    pseudo_bit_t       reserved0[0x00018];
+    pseudo_bit_t       version[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00040];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00010];
+    pseudo_bit_t       hca_core_clock[0x00010];/* Internal Clock freq in MHz */
+/* -------------- */
+    pseudo_bit_t       router_qp[0x00018];    /* QP number for router mode (8 LSBits should be 0). Low order 8 bits are taken from the TClass field of the incoming packet.
+                                                 Valid only if RE bit is set */
+    pseudo_bit_t       reserved3[0x00005];
+    pseudo_bit_t       ipr2[0x00001];         /* Hermon New. IP router on port 2 */
+    pseudo_bit_t       ipr1[0x00001];         /* Hermon New. IP router on port 1 */
+    pseudo_bit_t       ibr[0x00001];          /* InfiniBand Router Mode */
+/* -------------- */
+    pseudo_bit_t       udp[0x00001];          /* UD Port Check Enable
+                                                 0 - Port field in Address Vector is ignored
+                                                 1 - HCA will check the port field in AV entry (fetched for UD descriptor) against the Port of the UD QP executing the descriptor. */
+    pseudo_bit_t       he[0x00001];           /* Host Endianess - Used for Atomic Operations
+                                                 0 - Host is Little Endian
+                                                 1 - Host is Big endian
+                                                  */
+    pseudo_bit_t       reserved4[0x00001];
+    pseudo_bit_t       ce[0x00001];           /* Checksum Enabled - when Set IPoverIB checksum generation & checking is enabled */
+    pseudo_bit_t       reserved5[0x0001c];
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00040];
+/* -------------- */
+    struct hermonprm_qpcbaseaddr_st    qpc_eec_cqc_eqc_rdb_parameters;/* ## michal - this field has chenged to - "qpc_cqc_eqc_parameters" - gdror, this is ok for now */
+/* -------------- */
+    pseudo_bit_t       reserved7[0x00100];
+/* -------------- */
+    struct hermonprm_multicastparam_st multicast_parameters;/* ##michal- this field has chenged to - "IBUD/IPv6_multicast_parameters" - gdror - this is OK for now */
+/* -------------- */
+    pseudo_bit_t       reserved8[0x00080];
+/* -------------- */
+    struct hermonprm_tptparams_st      tpt_parameters;
+/* -------------- */
+    pseudo_bit_t       reserved9[0x00080];
+/* -------------- */
+    struct hermonprm_uar_params_st     uar_parameters;/* UAR Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00600];
+/* -------------- */
+}; 
+
+/* Event Queue Context Table Entry     #### michal - gdror fixed */
+
+struct hermonprm_eqc_st {      /* Little Endian */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       st[0x00004];           /* Event delivery state machine
+                                                 0x9 - Armed
+                                                 0xA - Fired
+                                                 0xB - Always_Armed (auto-rearm)
+                                                 other - reserved */
+    pseudo_bit_t       reserved1[0x00005];
+    pseudo_bit_t       oi[0x00001];           /* Oerrun ignore.
+                                                 If set, HW will not check EQ full condition when writing new EQEs. */
+    pseudo_bit_t       ec[0x00001];           /* is set, all EQEs are written (coalesced) to first EQ entry */
+    pseudo_bit_t       reserved2[0x00009];
+    pseudo_bit_t       status[0x00004];       /* EQ status:
+                                                 0000 - OK
+                                                 1010 - EQ write failure
+                                                 Valid for the QUERY_EQ and HW2SW_EQ commands only */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00005];
+    pseudo_bit_t       page_offset[0x00007];  /* offset bits[11:5] of first EQE in the EQ relative to the first page in memory region mapping this EQ */
+    pseudo_bit_t       reserved5[0x00014];
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00018];
+    pseudo_bit_t       log_eq_size[0x00005];  /* Log (base 2) of the EQ size (in entries).  Maximum EQ size is 2^22 EQEs (max log_eq_size is 22) */
+    pseudo_bit_t       reserved7[0x00003];
+/* -------------- */
+    pseudo_bit_t       eq_max_count[0x00010]; /* Event Generation Moderation counter */
+    pseudo_bit_t       eq_period[0x00010];    /* Event Generation moderation timed, microseconds */
+/* -------------- */
+    pseudo_bit_t       intr[0x0000a];         /* MSI-X table entry index to be used to signal interrupts on this EQ.  Reserved if MSI-X are not enabled in the PCI configuration header. */
+    pseudo_bit_t       reserved8[0x00016];
+/* -------------- */
+    pseudo_bit_t       mtt_base_addr_h[0x00008];/* MTT Base Address [39:32] relative to INIT_HCA.mtt_base_addr */
+    pseudo_bit_t       reserved9[0x00010];
+    pseudo_bit_t       log2_page_size[0x00006];/* Log (base 2) of MTT page size in units of 4KByte */
+    pseudo_bit_t       reserved10[0x00002];
+/* -------------- */
+    pseudo_bit_t       reserved11[0x00003];
+    pseudo_bit_t       mtt_base_addr_l[0x0001d];/* MTT Base Address [31:3] relative to INIT_HCA.mtt_base_addr */
+/* -------------- */
+    pseudo_bit_t       reserved12[0x00040];
+/* -------------- */
+    pseudo_bit_t       consumer_counter[0x00018];/* Consumer counter. The counter is incremented for each EQE polled from the EQ. 
+                                                  Must be 0x0 in EQ initialization. 
+                                                  Maintained by HW (valid for the QUERY_EQ command only). */
+    pseudo_bit_t       reserved13[0x00008];
+/* -------------- */
+    pseudo_bit_t       producer_counter[0x00018];/* Producer Coutner. The counter is incremented for each EQE that is written by the HW to the EQ. 
+                                                  EQ overrun is reported if Producer_counter + 1 equals to Consumer_counter and a EQE needs to be added.
+                                                  Maintained by HW (valid for the QUERY_EQ command only) */
+    pseudo_bit_t       reserved14[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved15[0x00080];
+/* -------------- */
+}; 
+
+/* Memory Translation Table (MTT) Entry     #### michal - match to PRM */
+
+struct hermonprm_mtt_st {      /* Little Endian */
+    pseudo_bit_t       ptag_h[0x00020];       /* High-order bits of physical tag. The size of the field depends on the page size of the region. Maximum PTAG size is 52 bits. */
+/* -------------- */
+    pseudo_bit_t       p[0x00001];            /* Present bit. If set, page entry is valid. If cleared, access to this page will generate non-present page access fault. */
+    pseudo_bit_t       reserved0[0x00002];
+    pseudo_bit_t       ptag_l[0x0001d];       /* Low-order bits of Physical tag. The size of the field depends on the page size of the region. Maximum PTAG size is 52 bits. */
+/* -------------- */
+}; 
+
+/* Memory Protection Table (MPT) Entry   ### doesn't match PRM (new fields were added). new size in bytes : 0x54 */
+
+struct hermonprm_mpt_st {      /* Little Endian */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       r_w[0x00001];          /* Defines whether this entry is Region (1) or Window (0) */
+    pseudo_bit_t       pa[0x00001];           /* Physical address. If set, no virtual-to-physical address translation is performed for this region */
+    pseudo_bit_t       lr[0x00001];           /* If set - local read access is enabled. Must be set for all MPT Entries. */
+    pseudo_bit_t       lw[0x00001];           /* If set - local write access is enabled */
+    pseudo_bit_t       rr[0x00001];           /* If set - remote read access is enabled. */
+    pseudo_bit_t       rw[0x00001];           /* If set - remote write access is enabled */
+    pseudo_bit_t       atomic[0x00001];       /* If set - remote Atomic access is allowed. */
+    pseudo_bit_t       eb[0x00001];           /* If set - bind is enabled. Valid only for regions. */
+    pseudo_bit_t       atc_req[0x00001];      /* If set, second hop of address translation (PA to MA) to be performed in the device prior to issuing the uplink request. */
+    pseudo_bit_t       atc_xlated[0x00001];   /* If set, uplink cycle to be issues with \93ATC_translated\94 indicator to force bypass of the chipset IOMMU. */
+    pseudo_bit_t       reserved1[0x00001];
+    pseudo_bit_t       no_snoop[0x00001];     /* If set, issue PCIe cycle with ûno Snoopÿ attribute - cycle not to be snooped in CPU caches */
+    pseudo_bit_t       reserved2[0x00008];
+    pseudo_bit_t       status[0x00004];       /* 0xF - Not Valid 0x3 - Free. else - HW ownership.Unbound Type1 windows are denoted by reg_wnd_len=0. Unbound Type II windows are denoted by Status = Free. */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00007];
+    pseudo_bit_t       bqp[0x00001];          /* 0 - not bound to qp (type 1 window, MR)1 - bound to qp (type 2 window) */
+    pseudo_bit_t       qpn[0x00018];          /* QP number this MW is attached to. Valid for type2 memory windows and on QUERY_MPT only */
+/* -------------- */
+    pseudo_bit_t       mem_key[0x00020];      /* The memory Key. The field holds the mem_key field in the following semantics: {key[7:0],key[31:8]}. */
+/* -------------- */
+    pseudo_bit_t       pd[0x00018];           /* Protection Domain. If VMM support is enabled PD[17:23] specify Guest VM Identifier */
+    pseudo_bit_t       en_rinv[0x00001];      /* Enable remote invalidation */
+    pseudo_bit_t       ei[0x00001];           /* Enable Invalidation - When set, Local/Remote invalidation can be executed on this window/region. Must be set for type2 windows and non-shared physical memory regions. Must be clear for regions that are used to access Work Queues, Completion Queues and Event Queues */
+    pseudo_bit_t       nce[0x00001];          /* Data can be cached in Network Cache (see ûNetwork Cacheÿ on page 81) */
+    pseudo_bit_t       fre[0x00001];          /* When set, Fast Registration Operations can be executed on this region */
+    pseudo_bit_t       rae[0x00001];          /* When set, remote access can be enabled on this region. Used when executing Fast Registration Work Request to validate that remote access rights can be granted to this MPT. If the bit is cleared, Fast Registration Work Request requesting remote access rights will fail */
+    pseudo_bit_t       w_dif[0x00001];        /* Wire space contains dif */
+    pseudo_bit_t       m_dif[0x00001];        /* Memory space contains dif */
+    pseudo_bit_t       reserved4[0x00001];
+/* -------------- */
+    pseudo_bit_t       start_addr_h[0x00020]; /* Start Address - Virtual Address where this region/window starts */
+/* -------------- */
+    pseudo_bit_t       start_addr_l[0x00020]; /* Start Address - Virtual Address where this region/window starts */
+/* -------------- */
+    pseudo_bit_t       len_h[0x00020];        /* Region/Window Length */
+/* -------------- */
+    pseudo_bit_t       len_l[0x00020];        /* Region/Window Length */
+/* -------------- */
+    pseudo_bit_t       lkey[0x00020];         /* Must be 0 for SW2HW_MPT. On QUERY_MPT and HW2SW_MPT commands for Memory Window it reflects the LKey of the Region that the Window is bound to.The field holds the lkey field in the following semantics: {key[7:0],key[31:8]}. */
+/* -------------- */
+    pseudo_bit_t       win_cnt[0x00018];      /* Number of windows bound to this region. Valid for regions only.The field is valid only for the QUERY_MPT and HW2SW_MPT commands. */
+    pseudo_bit_t       reserved5[0x00008];
+/* -------------- */
+    pseudo_bit_t       mtt_rep[0x00004];      /* Log (base 2) of the number of time an MTT is replicated.E.g. for 64KB virtual blocks from 512B blocks, a replication factor of 2^7 is needed (MTT_REPLICATION_FACTOR=7).Up to 1MB of replicated block works */
+    pseudo_bit_t       reserved6[0x00011];
+    pseudo_bit_t       block_mode[0x00001];   /* If set, the page size is not power of two, and entity_size is in bytes. */
+    pseudo_bit_t       len64[0x00001];        /* Region/Window Length[64]. This bit added to enable registering 2^64 bytes per region */
+    pseudo_bit_t       fbo_en[0x00001];       /* If set, mtt_fbo field is valid, otherwise it is calculated from least significant bytes of the address. Must be set when mtt_rep is used or MPT is block-mode region */
+    pseudo_bit_t       reserved7[0x00008];
+/* -------------- */
+    pseudo_bit_t       mtt_adr_h[0x00008];    /* Offset to MTT list for this region. Must be aligned on 8 bytes. */
+    pseudo_bit_t       reserved8[0x00018];
+/* -------------- */
+    pseudo_bit_t       mtt_adr_l[0x00020];    /* Offset to MTT list for this region. Must be aligned on 8 bytes.###michal-relpaced with: RESERVED .3;mtt_adr_l .29; gdror - this is OK to leave it this way. */
+/* -------------- */
+    pseudo_bit_t       mtt_size[0x00020];     /* Number of MTT entries allocated for this MR.When Fast Registration Operations cannot be executed on this region (FRE bit is zero) this field is reserved.When Fast Registration Operation is enabled (FRE bit is set) this field indicates the number of MTTs allocated for this MR. If mtt_sz value cannot be zero. */
+/* -------------- */
+    pseudo_bit_t       entity_size[0x00015];  /* Page/block size. If MPT maps pages, the page size is 2entiry_size. If MPT maps blocks, the entity_size field specifies block size in bytes. The minimum amount of memory that can be mapped with single MTT is 512 bytes. */
+    pseudo_bit_t       reserved9[0x0000b];
+/* -------------- */
+    pseudo_bit_t       mtt_fbo[0x00015];      /* First byte offset in the zero-based region - the first byte within the first block/page start address refers to. When mtt_rep is being used, fbo points within the replicated block (i.e. block-size x 2^mtt_rep) */
+    pseudo_bit_t       reserved10[0x0000b];
+/* -------------- */
+}; 
+
+/* Completion Queue Context Table Entry        #### michal - match PRM */
+
+struct hermonprm_completion_queue_context_st { /* Little Endian */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       st[0x00004];           /* Event delivery state machine
+                                                 0x0 - reserved
+                                                 0x9 - ARMED (Request for Notification)
+                                                 0x6 - ARMED SOLICITED (Request Solicited Notification)
+                                                 0xA - FIRED
+                                                 other - reserved
+                                                 
+                                                 Must be 0x0 in CQ initialization.
+                                                 Valid for the QUERY_CQ and HW2SW_CQ commands only. */
+    pseudo_bit_t       reserved1[0x00005];
+    pseudo_bit_t       oi[0x00001];           /* When set, overrun ignore is enabled.
+                                                 When set, Updates of CQ consumer counter (poll for completion) or Request completion notifications (Arm CQ) doorbells should not be rang on that CQ. */
+    pseudo_bit_t       cc[0x00001];           /* is set, all CQEs are written (coalesced) to first CQ entry */
+    pseudo_bit_t       reserved2[0x00009];
+    pseudo_bit_t       status[0x00004];       /* CQ status
+                                                 0000 - OK
+                                                 1001 - CQ overflow
+                                                 1010 - CQ write failure
+                                                 Valid for the QUERY_CQ and HW2SW_CQ commands only */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00005];
+    pseudo_bit_t       page_offset[0x00007];  /* offset of first CQE in the CQ relative to the first page in memory region mapping this CQ */
+    pseudo_bit_t       reserved5[0x00014];
+/* -------------- */
+    pseudo_bit_t       usr_page[0x00018];     /* UAR page this CQ can be accessed through (ringinig CQ doorbells) */
+    pseudo_bit_t       log_cq_size[0x00005];  /* Log (base 2) of the CQ size (in entries).
+                                                 Maximum CQ size is 2^17 CQEs (max log_cq_size is 17) */
+    pseudo_bit_t       reserved6[0x00003];
+/* -------------- */
+    pseudo_bit_t       cq_max_count[0x00010]; /* Event Generation Moderation counter */
+    pseudo_bit_t       cq_period[0x00010];    /* Event Generation moderation timed, microseconds */
+/* -------------- */
+    pseudo_bit_t       c_eqn[0x00009];        /* Event Queue this CQ reports completion events to.
+                                                 Valid values are 0 to 63
+                                                 If configured to value other than 0-63, completion events will not be reported on the CQ. */
+    pseudo_bit_t       reserved7[0x00017];
+/* -------------- */
+    pseudo_bit_t       mtt_base_addr_h[0x00008];/* MTT Base Address [39:32] in ICM relative to INIT_HCA.mtt_base_addr */
+    pseudo_bit_t       reserved8[0x00010];
+    pseudo_bit_t       log2_page_size[0x00006];
+    pseudo_bit_t       reserved9[0x00002];
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00003];
+    pseudo_bit_t       mtt_base_addr_l[0x0001d];/* MTT Base Address [31:3] in ICM relative to INIT_HCA.mtt_base_addr */
+/* -------------- */
+    pseudo_bit_t       last_notified_indx[0x00018];/* Maintained by HW.
+                                                 Valid for QUERY_CQ and HW2SW_CQ commands only. */
+    pseudo_bit_t       reserved11[0x00008];
+/* -------------- */
+    pseudo_bit_t       solicit_producer_indx[0x00018];/* Maintained by HW.
+                                                 Valid for QUERY_CQ and HW2SW_CQ commands only. 
+                                                  */
+    pseudo_bit_t       reserved12[0x00008];
+/* -------------- */
+    pseudo_bit_t       consumer_counter[0x00018];/* Consumer counter is a 32bits counter that is incremented for each CQE pooled from the CQ.
+                                                  */
+    pseudo_bit_t       reserved13[0x00008];
+/* -------------- */
+    pseudo_bit_t       producer_counter[0x00018];/* Producer counter is a 32bits counter that is incremented for each CQE that is written by the HW to the CQ.
+                                                 CQ overrun is reported if Producer_counter + 1 equals to Consumer_counter and a CQE needs to be added..
+                                                 Maintained by HW (valid for the QUERY_CQ and HW2SW_CQ commands only) */
+    pseudo_bit_t       reserved14[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved15[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved16[0x00020];
+/* -------------- */
+    pseudo_bit_t       db_record_addr_h[0x00020];/* CQ DB Record physical address [63:32] */
+/* -------------- */
+    pseudo_bit_t       reserved17[0x00003];
+    pseudo_bit_t       db_record_addr_l[0x0001d];/* CQ DB Record physical address [31:3] */
+/* -------------- */
+}; 
+
+/* GPIO_event_data   #### michal - gdror fixed */
+
+struct hermonprm_gpio_event_data_st {  /* Little Endian */
+    pseudo_bit_t       reserved0[0x00060];
+/* -------------- */
+    pseudo_bit_t       gpio_event_hi[0x00020];/* If any bit is set to 1, then a rising/falling event has occurred on the corrsponding GPIO pin. */
+/* -------------- */
+    pseudo_bit_t       gpio_event_lo[0x00020];/* If any bit is set to 1, then a rising/falling event has occurred on the corrsponding GPIO pin. */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00020];
+/* -------------- */
+}; 
+
+/* Event_data Field - QP/EE Events     #### michal - doesn't match PRM */
+
+struct hermonprm_qp_ee_event_st {      /* Little Endian */
+    pseudo_bit_t       qpn_een[0x00018];      /* QP/EE/SRQ number event is reported for  ###michal - field changed to QP number */
+    pseudo_bit_t       reserved0[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x0001c];
+    pseudo_bit_t       e_q[0x00001];          /* If set - EEN if cleared - QP in the QPN/EEN field
+                                                 Not valid on SRQ events  ###michal - field replaced with RESERVED */
+    pseudo_bit_t       reserved3[0x00003];
+/* -------------- */
+    pseudo_bit_t       reserved4[0x00060];
+/* -------------- */
+}; 
+
+/* InfiniHost-III-EX Type0 Configuration Header   ####michal - doesn't match PRM (new fields added, see below) */
+
+struct hermonprm_mt25208_type0_st {    /* Little Endian */
+    pseudo_bit_t       vendor_id[0x00010];    /* Hardwired to 0x15B3 */
+    pseudo_bit_t       device_id[0x00010];    /* 25208 (decimal) - InfiniHost-III compatible mode
+                                                 25408 (decimal) - InfiniHost-III EX mode (the mode described in this manual)
+                                                 25209 (decimal) - Flash burner mode - see Flash burning application note for further details on this mode
+                                                  */
+/* -------------- */
+    pseudo_bit_t       command[0x00010];      /* PCI Command Register */
+    pseudo_bit_t       status[0x00010];       /* PCI Status Register */
+/* -------------- */
+    pseudo_bit_t       revision_id[0x00008];
+    pseudo_bit_t       class_code_hca_class_code[0x00018];
+/* -------------- */
+    pseudo_bit_t       cache_line_size[0x00008];/* Cache Line Size */
+    pseudo_bit_t       latency_timer[0x00008];
+    pseudo_bit_t       header_type[0x00008];  /* hardwired to zero */
+    pseudo_bit_t       bist[0x00008];
+/* -------------- */
+    pseudo_bit_t       bar0_ctrl[0x00004];    /* hard-wired to 0100 */
+    pseudo_bit_t       reserved0[0x00010];
+    pseudo_bit_t       bar0_l[0x0000c];       /* Lower bits of BAR0 (Device Configuration Space) */
+/* -------------- */
+    pseudo_bit_t       bar0_h[0x00020];       /* Upper 32 bits of BAR0 (Device Configuration Space) */
+/* -------------- */
+    pseudo_bit_t       bar1_ctrl[0x00004];    /* Hardwired to 1100 */
+    pseudo_bit_t       reserved1[0x00010];
+    pseudo_bit_t       bar1_l[0x0000c];       /* Lower bits of BAR1 (User Access Region - UAR - space) */
+/* -------------- */
+    pseudo_bit_t       bar1_h[0x00020];       /* upper 32 bits of BAR1 (User Access Region - UAR - space) */
+/* -------------- */
+    pseudo_bit_t       bar2_ctrl[0x00004];    /* Hardwired to 1100 */
+    pseudo_bit_t       reserved2[0x00010];
+    pseudo_bit_t       bar2_l[0x0000c];       /* Lower bits of BAR2 - Local Attached Memory if present and enabled. Else zeroed. */
+/* -------------- */
+    pseudo_bit_t       bar2_h[0x00020];       /* Upper 32 bits of BAR2 - Local Attached Memory if present and enabled. Else zeroed. */
+/* -------------- */
+    pseudo_bit_t       cardbus_cis_pointer[0x00020];
+/* -------------- */
+    pseudo_bit_t       subsystem_vendor_id[0x00010];/* Specified by the device NVMEM configuration */
+    pseudo_bit_t       subsystem_id[0x00010]; /* Specified by the device NVMEM configuration */
+/* -------------- */
+    pseudo_bit_t       expansion_rom_enable[0x00001];/* Expansion ROM Enable. Hardwired to 0 if expansion ROM is disabled in the device NVMEM configuration. */
+    pseudo_bit_t       reserved3[0x0000a];
+    pseudo_bit_t       expansion_rom_base_address[0x00015];/* Expansion ROM Base Address (upper 21 bit). Hardwired to 0 if expansion ROM is disabled in the device NVMEM configuration. */
+/* -------------- */
+    pseudo_bit_t       capabilities_pointer[0x00008];/* Specified by the device NVMEM configuration */
+    pseudo_bit_t       reserved4[0x00018];
+/* -------------- */
+    pseudo_bit_t       reserved5[0x00020];
+/* -------------- */
+    pseudo_bit_t       interrupt_line[0x00008];
+    pseudo_bit_t       interrupt_pin[0x00008];
+    pseudo_bit_t       min_gnt[0x00008];
+    pseudo_bit_t       max_latency[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved6[0x00100];
+/* -------------- */
+    pseudo_bit_t       msi_cap_id[0x00008];
+    pseudo_bit_t       msi_next_cap_ptr[0x00008];
+    pseudo_bit_t       msi_en[0x00001];
+    pseudo_bit_t       multiple_msg_cap[0x00003];
+    pseudo_bit_t       multiple_msg_en[0x00003];
+    pseudo_bit_t       cap_64_bit_addr[0x00001];
+    pseudo_bit_t       reserved7[0x00008];
+/* -------------- */
+    pseudo_bit_t       msg_addr_l[0x00020];
+/* -------------- */
+    pseudo_bit_t       msg_addr_h[0x00020];
+/* -------------- */
+    pseudo_bit_t       msg_data[0x00010];
+    pseudo_bit_t       reserved8[0x00010];
+/* -------------- */
+    pseudo_bit_t       reserved9[0x00080];
+/* -------------- */
+    pseudo_bit_t       pm_cap_id[0x00008];    /* Power management capability ID - 01h */
+    pseudo_bit_t       pm_next_cap_ptr[0x00008];
+    pseudo_bit_t       pm_cap[0x00010];       /* [2:0] Version - 02h
+                                                 [3] PME clock - 0h
+                                                 [4] RsvP
+                                                 [5] Device specific initialization - 0h
+                                                 [8:6] AUX current - 0h
+                                                 [9] D1 support - 0h
+                                                 [10] D2 support - 0h
+                                                 [15:11] PME support - 0h */
+/* -------------- */
+    pseudo_bit_t       pm_status_control[0x00010];/* [14:13] - Data scale - 0h */
+    pseudo_bit_t       pm_control_status_brdg_ext[0x00008];
+    pseudo_bit_t       data[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00040];
+/* -------------- */
+    pseudo_bit_t       vpd_cap_id[0x00008];   /* 03h */
+    pseudo_bit_t       vpd_next_cap_id[0x00008];
+    pseudo_bit_t       vpd_address[0x0000f];
+    pseudo_bit_t       f[0x00001];
+/* -------------- */
+    pseudo_bit_t       vpd_data[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved11[0x00040];
+/* -------------- */
+    pseudo_bit_t       pciex_cap_id[0x00008]; /* PCI-Express capability ID - 10h */
+    pseudo_bit_t       pciex_next_cap_ptr[0x00008];
+    pseudo_bit_t       pciex_cap[0x00010];    /* [3:0] Capability version - 1h
+                                                 [7:4] Device/Port Type - 0h
+                                                 [8] Slot implemented - 0h
+                                                 [13:9] Interrupt message number
+                                                  */
+/* -------------- */
+    pseudo_bit_t       device_cap[0x00020];   /* [2:0] Max_Payload_Size supported - 2h
+                                                 [4:3] Phantom Function supported - 0h
+                                                 [5] Extended Tag Filed supported - 0h
+                                                 [8:6] Endpoint L0s Acceptable Latency - TBD
+                                                 [11:9] Endpoint L1 Acceptable Latency - TBD
+                                                 [12] Attention Button Present - configured through InfiniBurn
+                                                 [13] Attention Indicator Present - configured through InfiniBurn
+                                                 [14] Power Indicator Present - configured through InfiniBurn
+                                                 [25:18] Captured Slot Power Limit Value
+                                                 [27:26] Captured Slot Power Limit Scale */
+/* -------------- */
+    pseudo_bit_t       device_control[0x00010];
+    pseudo_bit_t       device_status[0x00010];
+/* -------------- */
+    pseudo_bit_t       link_cap[0x00020];     /* [3:0] Maximum Link Speed - 1h
+                                                 [9:4] Maximum Link Width - 8h
+                                                 [11:10] Active State Power Management Support - 3h
+                                                 [14:12] L0s Exit Latency - TBD
+                                                 [17:15] L1 Exit Latency - TBD
+                                                 [31:24] Port Number - 0h */
+/* -------------- */
+    pseudo_bit_t       link_control[0x00010];
+    pseudo_bit_t       link_status[0x00010];  /* [3:0] Link Speed - 1h
+                                                 [9:4] Negotiated Link Width
+                                                 [12] Slot clock configuration - 1h */
+/* -------------- */
+    pseudo_bit_t       reserved12[0x00260];
+/* -------------- */
+    pseudo_bit_t       advanced_error_reporting_cap_id[0x00010];/* 0001h. */
+    pseudo_bit_t       capability_version[0x00004];/* 1h */
+    pseudo_bit_t       next_capability_offset[0x0000c];/* 0h */
+/* -------------- */
+    pseudo_bit_t       uncorrectable_error_status_register[0x00020];/* 0 Training Error Status
+                                                 4 Data Link Protocol Error Status
+                                                 12 Poisoned TLP Status 
+                                                 13 Flow Control Protocol Error Status 
+                                                 14 Completion Timeout Status 
+                                                 15 Completer Abort Status 
+                                                 16 Unexpected Completion Status 
+                                                 17 Receiver Overflow Status 
+                                                 18 Malformed TLP Status 
+                                                 19 ECRC Error Status 
+                                                 20 Unsupported Request Error Status */
+/* -------------- */
+    pseudo_bit_t       uncorrectable_error_mask_register[0x00020];/* 0 Training Error Mask
+                                                 4 Data Link Protocol Error Mask
+                                                 12 Poisoned TLP Mask 
+                                                 13 Flow Control Protocol Error Mask
+                                                 14 Completion Timeout Mask
+                                                 15 Completer Abort Mask
+                                                 16 Unexpected Completion Mask
+                                                 17 Receiver Overflow Mask
+                                                 18 Malformed TLP Mask
+                                                 19 ECRC Error Mask
+                                                 20 Unsupported Request Error Mask */
+/* -------------- */
+    pseudo_bit_t       uncorrectable_severity_mask_register[0x00020];/* 0 Training Error Severity
+                                                 4 Data Link Protocol Error Severity
+                                                 12 Poisoned TLP Severity
+                                                 13 Flow Control Protocol Error Severity
+                                                 14 Completion Timeout Severity
+                                                 15 Completer Abort Severity
+                                                 16 Unexpected Completion Severity
+                                                 17 Receiver Overflow Severity
+                                                 18 Malformed TLP Severity
+                                                 19 ECRC Error Severity
+                                                 20 Unsupported Request Error Severity */
+/* -------------- */
+    pseudo_bit_t       correctable_error_status_register[0x00020];/* 0 Receiver Error Status
+                                                 6 Bad TLP Status
+                                                 7 Bad DLLP Status
+                                                 8 REPLAY_NUM Rollover Status
+                                                 12 Replay Timer Timeout Status */
+/* -------------- */
+    pseudo_bit_t       correctable_error_mask_register[0x00020];/* 0 Receiver Error Mask
+                                                 6 Bad TLP Mask
+                                                 7 Bad DLLP Mask
+                                                 8 REPLAY_NUM Rollover Mask
+                                                 12 Replay Timer Timeout Mask */
+/* -------------- */
+    pseudo_bit_t       advance_error_capabilities_and_control_register[0x00020];
+/* -------------- */
+    struct hermonprm_header_log_register_st    header_log_register;
+/* -------------- */
+    pseudo_bit_t       reserved13[0x006a0];
+/* -------------- */
+}; 
+
+/* Event Data Field - Performance Monitor */
+
+struct hermonprm_performance_monitor_event_st {        /* Little Endian */
+    struct hermonprm_performance_monitors_st   performance_monitor_snapshot;/* Performance monitor snapshot */
+/* -------------- */
+    pseudo_bit_t       monitor_number[0x00008];/* 0x01 - SQPC
+                                                 0x02 - RQPC
+                                                 0x03 - CQC
+                                                 0x04 - Rkey
+                                                 0x05 - TLB
+                                                 0x06 - port0
+                                                 0x07 - port1 */
+    pseudo_bit_t       reserved0[0x00018];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00040];
+/* -------------- */
+}; 
+
+/* Event_data Field - Page Faults */
+
+struct hermonprm_page_fault_event_data_st {    /* Little Endian */
+    pseudo_bit_t       va_h[0x00020];         /* Virtual Address[63:32] this page fault is reported on */
+/* -------------- */
+    pseudo_bit_t       va_l[0x00020];         /* Virtual Address[63:32] this page fault is reported on */
+/* -------------- */
+    pseudo_bit_t       mem_key[0x00020];      /* Memory Key this page fault is reported on */
+/* -------------- */
+    pseudo_bit_t       qp[0x00018];           /* QP this page fault is reported on */
+    pseudo_bit_t       reserved0[0x00003];
+    pseudo_bit_t       a[0x00001];            /* If set the memory access that caused the page fault was atomic */
+    pseudo_bit_t       lw[0x00001];           /* If set the memory access that caused the page fault was local write */
+    pseudo_bit_t       lr[0x00001];           /* If set the memory access that caused the page fault was local read */
+    pseudo_bit_t       rw[0x00001];           /* If set the memory access that caused the page fault was remote write */
+    pseudo_bit_t       rr[0x00001];           /* If set the memory access that caused the page fault was remote read */
+/* -------------- */
+    pseudo_bit_t       pd[0x00018];           /* PD this page fault is reported on */
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       prefetch_len[0x00020]; /* Indicates how many subsequent pages in the same memory region/window will be accessed by the following transaction after this page fault is resolved. measured in bytes. SW can use this information in order to page-in the subsequent pages if they are not present. */
+/* -------------- */
+}; 
+
+/* WQE segments format */
+
+struct hermonprm_wqe_segment_st {      /* Little Endian */
+    struct hermonprm_send_wqe_segment_st       send_wqe_segment;/* Send WQE segment format */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00280];
+/* -------------- */
+    struct hermonprm_wqe_segment_ctrl_mlx_st   mlx_wqe_segment_ctrl;/* MLX WQE segment format */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00100];
+/* -------------- */
+    pseudo_bit_t       recv_wqe_segment_ctrl[4][0x00020];/* Receive segment format */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00080];
+/* -------------- */
+}; 
+
+/* Event_data Field - Port State Change   #### michal - match PRM */
+
+struct hermonprm_port_state_change_st {        /* Little Endian */
+    pseudo_bit_t       reserved0[0x00040];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x0001c];
+    pseudo_bit_t       p[0x00002];            /* Port number (1 or 2) */
+    pseudo_bit_t       reserved2[0x00002];
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00060];
+/* -------------- */
+}; 
+
+/* Event_data Field - Completion Queue Error     #### michal - match PRM */
+
+struct hermonprm_completion_queue_error_st {   /* Little Endian */
+    pseudo_bit_t       cqn[0x00018];          /* CQ number event is reported for */
+    pseudo_bit_t       reserved0[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00020];
+/* -------------- */
+    pseudo_bit_t       syndrome[0x00008];     /* Error syndrome
+                                                 0x01 - CQ overrun
+                                                 0x02 - CQ access violation error */
+    pseudo_bit_t       reserved2[0x00018];
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00060];
+/* -------------- */
+}; 
+
+/* Event_data Field - Completion Event #### michal - match PRM */
+
+struct hermonprm_completion_event_st { /* Little Endian */
+    pseudo_bit_t       cqn[0x00018];          /* CQ number event is reported for */
+    pseudo_bit_t       reserved0[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved1[0x000a0];
+/* -------------- */
+}; 
+
+/* Event Queue Entry         #### michal - match to PRM */
+
+struct hermonprm_event_queue_entry_st {        /* Little Endian */
+    pseudo_bit_t       event_sub_type[0x00008];/* Event Sub Type. 
+                                                 Defined for events which have sub types, zero elsewhere. */
+    pseudo_bit_t       reserved0[0x00008];
+    pseudo_bit_t       event_type[0x00008];   /* Event Type */
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       event_data[6][0x00020];/* Delivers auxilary data to handle event. */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00007];
+    pseudo_bit_t       owner[0x00001];        /* Owner of the entry 
+                                                 0 SW 
+                                                 1 HW */
+    pseudo_bit_t       reserved3[0x00018];
+/* -------------- */
+}; 
+
+/* QP/EE State Transitions Command Parameters  ###michal - doesn't match PRM (field name changed) */
+
+struct hermonprm_qp_ee_state_transitions_st {  /* Little Endian */
+    pseudo_bit_t       opt_param_mask[0x00020];/* This field defines which optional parameters are passed. Each bit specifies whether optional parameter is passed (set) or not (cleared). The optparammask is defined for each QP/EE command. */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x00020];
+/* -------------- */
+    struct hermonprm_queue_pair_ee_context_entry_st    qpc_eec_data;/* QPC/EEC data  ###michal - field has replaced with "qpc_data" (size .1948) */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x00800];
+/* -------------- */
+}; 
+
+/* Completion Queue Entry Format        #### michal - fixed by gdror */
+
+struct hermonprm_completion_queue_entry_st {   /* Little Endian */
+    pseudo_bit_t       qpn[0x00018];          /* Indicates the QP for which completion is being reported */
+    pseudo_bit_t       reserved0[0x00002];
+    pseudo_bit_t       d2s[0x00001];          /* Duplicate to Sniffer. This bit is set if both Send and Receive queues are subject for sniffer queue. The HW delivers
+                                                 packet only to send-associated sniffer receive queue. */
+    pseudo_bit_t       fcrc_sd[0x00001];      /* FCRC: If set, FC CRC is correct in FC frame encapsulated in payload. Valid for Raw Frame FC receive queue only.
+                                                 SD: CQ associated with Sniffer receive queue. If set, packets were skipped due to lack of receive buffers on the Sniffer receive queue */
+    pseudo_bit_t       fl[0x00001];           /* Force Loopback Valid for responder RawEth and UD only. */
+    pseudo_bit_t       vlan[0x00002];         /* Valid for RawEth and UD over Ethernet only. Applicable for RawEth and UD over Ethernet Receive queue
+                                                  00 - No VLAN header was present in the packet
+                                                 01 - C-VLAN (802.1q) Header was present in the frame.
+                                                 10 - S-VLAN (802.1ad) Header was present in the frame. */
+    pseudo_bit_t       dife[0x00001];         /* DIF Error */
+/* -------------- */
+    pseudo_bit_t       immediate_rssvalue_invalidatekey[0x00020];/* For a responder CQE, if completed WQE Opcode is Send With Immediate or Write With Immediate, this field contains immediate field of the received message.
+                                                 For a responder CQE, if completed WQE Opcode is Send With Invalidate, this field contains the R_key that was invalidated.
+                                                 For a responder CQE of a GSI packet this filed contains the Pkey Index of the packet.
+                                                 For IPoIB (UD) and RawEth CQEs this field contains the RSS hash function value.
+                                                 Otherwise, this field is reserved. */
+/* -------------- */
+    pseudo_bit_t       srq_rqpn[0x00018];     /* For Responder UD QPs, Remote (source) QP number. 
+                                                 For Responder SRC QPs, SRQ number.
+                                                 Otherwise, this field is reserved. */
+    pseudo_bit_t       ml_path_mac_index[0x00007];/* For responder UD over IB CQE: These are the lower LMC bits of the DLID in an incoming UD packet, higher bits of this field, that are not part of the LMC bits are zeroed by HW. Invalid if incoming message DLID is the permissive LID or incoming message is multicast.
+                                                  For responder UD over Ethernet and RawEth CQEs: Index of the MAC Table entry that the packet DMAC was matched against.
+                                                  Otherwise, this field is reserved. */
+    pseudo_bit_t       g[0x00001];            /* For responder UD over IB CQE this bit indicates the presence of a GRH
+                                                 For responder UD over Ethernet CQE this bit is set if IPv6 L3 header was present in the packet, this bit is cleared if IPv4 L3 Header was present in the packet.
+                                                 Otherwise, this field is reserved. */
+/* -------------- */
+    pseudo_bit_t       slid_smac47_32[0x00010];/* For responder UD over IB CQE it is the source LID of the packet.
+                                                 For responder UD over Ethernet and RawEth CQEs it is the source-MAC[47:32] of the packet.
+                                                 Otherwise, this field is reserved. */
+    pseudo_bit_t       vid[0x0000c];          /* Frame VID, valid for Responder Raw Ethernet and UD over Ethernet QP. Otherwise, this field is reserved. */
+    pseudo_bit_t       sl[0x00004];           /* For responder UD over IB - the Service Level of the packet.
+                                                  For responder UD over Ethernet and RawEth - it is VLAN-header[15:12]
+                                                  Otherwise, this field is reserved. */
+/* -------------- */
+    pseudo_bit_t       smac31_0_rawether_ipoib_status[0x00020];/* For responder UD over Ethernet - source MAC[31:0] of the packet. 
+                                                  For responder RawEth and UD over IB - RawEth-IPoIB status {3 reserved, ipok,udp,tcp,ipv4opt,ipv6,ipv4vf,ipv4,rht(6),ipv6extmask(6),reserved(2),l2am,reserved(2),bfcs,reserved(2),enc} 
+                                                  Otherwise, this field is reserved. */
+/* -------------- */
+    pseudo_bit_t       byte_cnt[0x00020];     /* Byte count of data transferred. Applicable for RDMA-read, Atomic and all receive operations. completions. 
+                                                 For Receive Queue that is subject for headers. separation, byte_cnt[31:24] specify number of bytes scattered to the first scatter entry (headers. length). Byte_cnt[23:0] specify total byte count received (including headers). */
+/* -------------- */
+    pseudo_bit_t       checksum[0x00010];     /* Valid for RawEth and IPoIB only. */
+    pseudo_bit_t       wqe_counter[0x00010];
+/* -------------- */
+    pseudo_bit_t       opcode[0x00005];       /* Send completions - same encoding as WQE. 
+                                                  Error coding is 0x1F
+                                                  Receive:
+                                                  0x0 - RDMA-Write with Immediate
+                                                  0x1 - Send
+                                                  0x2 - Send with Immediate
+                                                  0x3 - Send & Invalidate
+                                                  */
+    pseudo_bit_t       is[0x00001];           /* inline scatter */
+    pseudo_bit_t       s_r[0x00001];          /* send 1 / receive 0 */
+    pseudo_bit_t       owner[0x00001];        /* HW Flips this bit for every CQ warp around. Initialized to Zero. */
+    pseudo_bit_t       reserved1[0x00010];
+    pseudo_bit_t       reserved2[0x00008];
+/* -------------- */
+}; 
+
+/*  */
+
+struct hermonprm_mcg_qps_st {  /* Little Endian */
+    struct hermonprm_mcg_qp_dw_st      dw[128];
+/* -------------- */
+}; 
+
+/*  */
+
+struct hermonprm_mcg_hdr_st {  /* Little Endian */
+    pseudo_bit_t       reserved0[0x00006];
+    pseudo_bit_t       next_mcg[0x0001a];
+/* -------------- */
+    pseudo_bit_t       members_count[0x00018];
+    pseudo_bit_t       reserved1[0x00008];
+/* -------------- */
+    pseudo_bit_t       reserved2[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved3[0x00020];
+/* -------------- */
+    pseudo_bit_t       gid3[0x00020];
+/* -------------- */
+    pseudo_bit_t       gid2[0x00020];
+/* -------------- */
+    pseudo_bit_t       gid1[0x00020];
+/* -------------- */
+    pseudo_bit_t       gid0[0x00020];
+/* -------------- */
+}; 
+
+/*  */
+
+struct hermonprm_sched_queue_context_st {      /* Little Endian */
+    pseudo_bit_t       policy[0x00003];       /* Schedule Queue Policy - 0 - LLSQ, 1 - GBSQ, 2 - BESQ */
+    pseudo_bit_t       vl15[0x00001];
+    pseudo_bit_t       sl[0x00004];           /* SL this Schedule Queue is associated with (if vl15 bit is 0) */
+    pseudo_bit_t       port[0x00002];         /* Port this Schedule Queue is associated with */
+    pseudo_bit_t       reserved0[0x00006];
+    pseudo_bit_t       weight[0x00010];       /* Weight of this SchQ */
+/* -------------- */
+}; 
+
+/*  */
+
+struct hermonprm_ecc_detect_event_data_st {    /* Little Endian */
+    pseudo_bit_t       reserved0[0x00080];
+/* -------------- */
+    pseudo_bit_t       cause_lsb[0x00001];
+    pseudo_bit_t       reserved1[0x00002];
+    pseudo_bit_t       cause_msb[0x00001];
+    pseudo_bit_t       reserved2[0x00002];
+    pseudo_bit_t       err_rmw[0x00001];
+    pseudo_bit_t       err_src_id[0x00003];
+    pseudo_bit_t       err_da[0x00002];
+    pseudo_bit_t       err_ba[0x00002];
+    pseudo_bit_t       reserved3[0x00011];
+    pseudo_bit_t       overflow[0x00001];
+/* -------------- */
+    pseudo_bit_t       err_ra[0x00010];
+    pseudo_bit_t       err_ca[0x00010];
+/* -------------- */
+}; 
+
+/* Event_data Field - ECC Detection Event */
+
+struct hermonprm_scrubbing_event_st {  /* Little Endian */
+    pseudo_bit_t       reserved0[0x00080];
+/* -------------- */
+    pseudo_bit_t       cause_lsb[0x00001];    /* data integrity error cause:
+                                                 single ECC error in the 64bit lsb data, on the rise edge of the clock */
+    pseudo_bit_t       reserved1[0x00002];
+    pseudo_bit_t       cause_msb[0x00001];    /* data integrity error cause:
+                                                 single ECC error in the 64bit msb data, on the fall edge of the clock */
+    pseudo_bit_t       reserved2[0x00002];
+    pseudo_bit_t       err_rmw[0x00001];      /* transaction type:
+                                                 0 - read
+                                                 1 - read/modify/write */
+    pseudo_bit_t       err_src_id[0x00003];   /* source of the transaction: 0x4 - PCI, other - internal or IB */
+    pseudo_bit_t       err_da[0x00002];       /* Error DIMM address */
+    pseudo_bit_t       err_ba[0x00002];       /* Error bank address */
+    pseudo_bit_t       reserved3[0x00011];
+    pseudo_bit_t       overflow[0x00001];     /* Fatal: ECC error FIFO overflow - ECC errors were detected, which may or may not have been corrected by InfiniHost-III-EX */
+/* -------------- */
+    pseudo_bit_t       err_ra[0x00010];       /* Error row address */
+    pseudo_bit_t       err_ca[0x00010];       /* Error column address */
+/* -------------- */
+}; 
+
+/*  */
+
+struct hermonprm_eq_cmd_doorbell_st {  /* Little Endian */
+    pseudo_bit_t       reserved0[0x00020];
+/* -------------- */
+}; 
+
+/* 0 */
+
+struct hermonprm_hermon_prm_st {       /* Little Endian */
+    struct hermonprm_completion_queue_entry_st completion_queue_entry;/* Completion Queue Entry Format */
+/* -------------- */
+    pseudo_bit_t       reserved0[0x7ff00];
+/* -------------- */
+    struct hermonprm_qp_ee_state_transitions_st        qp_ee_state_transitions;/* QP/EE State Transitions Command Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved1[0x7f000];
+/* -------------- */
+    struct hermonprm_event_queue_entry_st      event_queue_entry;/* Event Queue Entry */
+/* -------------- */
+    pseudo_bit_t       reserved2[0x7ff00];
+/* -------------- */
+    struct hermonprm_completion_event_st       completion_event;/* Event_data Field - Completion Event */
+/* -------------- */
+    pseudo_bit_t       reserved3[0x7ff40];
+/* -------------- */
+    struct hermonprm_completion_queue_error_st completion_queue_error;/* Event_data Field - Completion Queue Error */
+/* -------------- */
+    pseudo_bit_t       reserved4[0x7ff40];
+/* -------------- */
+    struct hermonprm_port_state_change_st      port_state_change;/* Event_data Field - Port State Change */
+/* -------------- */
+    pseudo_bit_t       reserved5[0x7ff40];
+/* -------------- */
+    struct hermonprm_wqe_segment_st    wqe_segment;/* WQE segments format */
+/* -------------- */
+    pseudo_bit_t       reserved6[0x7f000];
+/* -------------- */
+    struct hermonprm_page_fault_event_data_st  page_fault_event_data;/* Event_data Field - Page Faults */
+/* -------------- */
+    pseudo_bit_t       reserved7[0x7ff40];
+/* -------------- */
+    struct hermonprm_performance_monitor_event_st      performance_monitor_event;/* Event Data Field - Performance Monitor */
+/* -------------- */
+    pseudo_bit_t       reserved8[0xfff20];
+/* -------------- */
+    struct hermonprm_mt25208_type0_st  mt25208_type0;/* InfiniHost-III-EX Type0 Configuration Header */
+/* -------------- */
+    pseudo_bit_t       reserved9[0x7f000];
+/* -------------- */
+    struct hermonprm_qp_ee_event_st    qp_ee_event;/* Event_data Field - QP/EE Events */
+/* -------------- */
+    pseudo_bit_t       reserved10[0x00040];
+/* -------------- */
+    struct hermonprm_gpio_event_data_st        gpio_event_data;
+/* -------------- */
+    pseudo_bit_t       reserved11[0x7fe40];
+/* -------------- */
+    struct hermonprm_ud_address_vector_st      ud_address_vector;/* UD Address Vector */
+/* -------------- */
+    pseudo_bit_t       reserved12[0x7ff00];
+/* -------------- */
+    struct hermonprm_queue_pair_ee_context_entry_st    queue_pair_ee_context_entry;/* QP and EE Context Entry */
+/* -------------- */
+    pseudo_bit_t       reserved13[0x7f840];
+/* -------------- */
+    struct hermonprm_address_path_st   address_path;/* Address Path */
+/* -------------- */
+    pseudo_bit_t       reserved14[0x7fea0];
+/* -------------- */
+    struct hermonprm_completion_queue_context_st       completion_queue_context;/* Completion Queue Context Table Entry */
+/* -------------- */
+    pseudo_bit_t       reserved15[0x7fe00];
+/* -------------- */
+    struct hermonprm_mpt_st    mpt;         /* Memory Protection Table (MPT) Entry */
+/* -------------- */
+    pseudo_bit_t       reserved16[0x7fe00];
+/* -------------- */
+    struct hermonprm_mtt_st    mtt;         /* Memory Translation Table (MTT) Entry */
+/* -------------- */
+    pseudo_bit_t       reserved17[0x7ffc0];
+/* -------------- */
+    struct hermonprm_eqc_st    eqc;         /* Event Queue Context Table Entry */
+/* -------------- */
+    pseudo_bit_t       reserved18[0x7fe00];
+/* -------------- */
+    struct hermonprm_performance_monitors_st   performance_monitors;/* Performance Monitors */
+/* -------------- */
+    pseudo_bit_t       reserved19[0x7ff80];
+/* -------------- */
+    struct hermonprm_hca_command_register_st   hca_command_register;/* HCA Command Register (HCR) */
+/* -------------- */
+    pseudo_bit_t       reserved20[0xfff20];
+/* -------------- */
+    struct hermonprm_init_hca_st       init_hca;/* INIT_HCA & QUERY_HCA Parameters Block */
+/* -------------- */
+    pseudo_bit_t       reserved21[0x7f000];
+/* -------------- */
+    struct hermonprm_qpcbaseaddr_st    qpcbaseaddr;/* QPC/EEC/CQC/EQC/RDB Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved22[0x7fc00];
+/* -------------- */
+    struct hermonprm_udavtable_memory_parameters_st    udavtable_memory_parameters;/* Memory Access Parameters for UD Address Vector Table */
+/* -------------- */
+    pseudo_bit_t       reserved23[0x7ffc0];
+/* -------------- */
+    struct hermonprm_multicastparam_st multicastparam;/* Multicast Support Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved24[0x7ff00];
+/* -------------- */
+    struct hermonprm_tptparams_st      tptparams;/* Translation and Protection Tables Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved25[0x7ff00];
+/* -------------- */
+    pseudo_bit_t       reserved26[0x00800];
+/* -------------- */
+    pseudo_bit_t       reserved27[0x00100];
+/* -------------- */
+    pseudo_bit_t       reserved28[0x7f700];
+/* -------------- */
+    pseudo_bit_t       reserved29[0x00100];
+/* -------------- */
+    pseudo_bit_t       reserved30[0x7ff00];
+/* -------------- */
+    struct hermonprm_query_fw_st       query_fw;/* QUERY_FW Parameters Block */
+/* -------------- */
+    pseudo_bit_t       reserved31[0x7f800];
+/* -------------- */
+    struct hermonprm_query_adapter_st  query_adapter;/* QUERY_ADAPTER Parameters Block */
+/* -------------- */
+    pseudo_bit_t       reserved32[0x7f800];
+/* -------------- */
+    struct hermonprm_query_dev_cap_st  query_dev_cap;/* Query Device Limitations */
+/* -------------- */
+    pseudo_bit_t       reserved33[0x7f800];
+/* -------------- */
+    struct hermonprm_uar_params_st     uar_params;/* UAR Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved34[0x7ff00];
+/* -------------- */
+    struct hermonprm_init_port_st      init_port;/* INIT_PORT Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved35[0x7f800];
+/* -------------- */
+    struct hermonprm_mgm_entry_st      mgm_entry;/* Multicast Group Member */
+/* -------------- */
+    pseudo_bit_t       reserved36[0x7fe00];
+/* -------------- */
+    struct hermonprm_set_ib_st set_ib;   /* SET_IB Parameters */
+/* -------------- */
+    pseudo_bit_t       reserved37[0x7fe00];
+/* -------------- */
+    struct hermonprm_rd_send_doorbell_st       rd_send_doorbell;/* RD-send doorbell */
+/* -------------- */
+    pseudo_bit_t       reserved38[0x7ff80];
+/* -------------- */
+    struct hermonprm_send_doorbell_st  send_doorbell;/* Send doorbell */
+/* -------------- */
+    pseudo_bit_t       reserved39[0x7ffc0];
+/* -------------- */
+    struct hermonprm_receive_doorbell_st       receive_doorbell;/* Receive doorbell */
+/* -------------- */
+    pseudo_bit_t       reserved40[0x7ffc0];
+/* -------------- */
+    struct hermonprm_cq_cmd_doorbell_st        cq_cmd_doorbell;/* CQ Doorbell */
+/* -------------- */
+    pseudo_bit_t       reserved41[0xfffc0];
+/* -------------- */
+    struct hermonprm_uar_st    uar;         /* User Access Region */
+/* -------------- */
+    pseudo_bit_t       reserved42[0x7c000];
+/* -------------- */
+    struct hermonprm_mgmqp_st  mgmqp;     /* Multicast Group Member QP */
+/* -------------- */
+    pseudo_bit_t       reserved43[0x7ffe0];
+/* -------------- */
+    struct hermonprm_query_debug_msg_st        query_debug_msg;/* Query Debug Message */
+/* -------------- */
+    pseudo_bit_t       reserved44[0x7f800];
+/* -------------- */
+    struct hermonprm_mad_ifc_st        mad_ifc; /* MAD_IFC Input Mailbox */
+/* -------------- */
+    pseudo_bit_t       reserved45[0x00900];
+/* -------------- */
+    struct hermonprm_mad_ifc_input_modifier_st mad_ifc_input_modifier;/* MAD_IFC Input Modifier */
+/* -------------- */
+    pseudo_bit_t       reserved46[0x7e6e0];
+/* -------------- */
+    struct hermonprm_resize_cq_st      resize_cq;/* Resize CQ Input Mailbox */
+/* -------------- */
+    pseudo_bit_t       reserved47[0x7fe00];
+/* -------------- */
+    struct hermonprm_completion_with_error_st  completion_with_error;/* Completion with Error CQE */
+/* -------------- */
+    pseudo_bit_t       reserved48[0x7ff00];
+/* -------------- */
+    struct hermonprm_hcr_completion_event_st   hcr_completion_event;/* Event_data Field - HCR Completion Event */
+/* -------------- */
+    pseudo_bit_t       reserved49[0x7ff40];
+/* -------------- */
+    struct hermonprm_transport_and_ci_error_counters_st        transport_and_ci_error_counters;/* Transport and CI Error Counters */
+/* -------------- */
+    pseudo_bit_t       reserved50[0x7f000];
+/* -------------- */
+    struct hermonprm_performance_counters_st   performance_counters;/* Performance Counters */
+/* -------------- */
+    pseudo_bit_t       reserved51[0x9ff800];
+/* -------------- */
+    struct hermonprm_fast_registration_segment_st      fast_registration_segment;/* Fast Registration Segment */
+/* -------------- */
+    pseudo_bit_t       reserved52[0x7ff00];
+/* -------------- */
+    struct hermonprm_pbl_st    pbl;         /* Physical Buffer List */
+/* -------------- */
+    pseudo_bit_t       reserved53[0x7ff00];
+/* -------------- */
+    struct hermonprm_srq_context_st    srq_context;/* SRQ Context */
+/* -------------- */
+    pseudo_bit_t       reserved54[0x7fe80];
+/* -------------- */
+    struct hermonprm_mod_stat_cfg_st   mod_stat_cfg;/* MOD_STAT_CFG */
+/* -------------- */
+    pseudo_bit_t       reserved55[0x7f800];
+/* -------------- */
+    struct hermonprm_virtual_physical_mapping_st       virtual_physical_mapping;/* Virtual and Physical Mapping */
+/* -------------- */
+    pseudo_bit_t       reserved56[0x7ff80];
+/* -------------- */
+    struct hermonprm_cq_ci_db_record_st        cq_ci_db_record;/* CQ_CI_DB_Record */
+/* -------------- */
+    pseudo_bit_t       reserved57[0x7ffc0];
+/* -------------- */
+    struct hermonprm_cq_arm_db_record_st       cq_arm_db_record;/* CQ_ARM_DB_Record */
+/* -------------- */
+    pseudo_bit_t       reserved58[0x7ffc0];
+/* -------------- */
+    struct hermonprm_qp_db_record_st   qp_db_record;/* QP_DB_Record */
+/* -------------- */
+    pseudo_bit_t       reserved59[0x00020];
+/* -------------- */
+    pseudo_bit_t       reserved60[0x1fffc0];
+/* -------------- */
+    struct hermonprm_configuration_registers_st        configuration_registers;/* InfiniHost III EX Configuration Registers */
+/* -------------- */
+    struct hermonprm_eq_set_ci_table_st        eq_set_ci_table;/* EQ Set CI DBs Table */
+/* -------------- */
+    pseudo_bit_t       reserved61[0x01000];
+/* -------------- */
+    pseudo_bit_t       reserved62[0x00040];
+/* -------------- */
+    pseudo_bit_t       reserved63[0x00fc0];
+/* -------------- */
+    struct hermonprm_clr_int_st        clr_int; /* Clear Interrupt Register */
+/* -------------- */
+    pseudo_bit_t       reserved64[0xffcfc0];
+/* -------------- */
+}; 
+#endif /* H_prefix_hermonprm_bits_fixnames_MT25408_PRM_csp_H */
diff --git a/src/drivers/infiniband/hermon.c b/src/drivers/infiniband/hermon.c
new file mode 100644 (file)
index 0000000..f808a32
--- /dev/null
@@ -0,0 +1,2168 @@
+/*
+ * Copyright (C) 2007 Michael Brown <mbrown@fensystems.co.uk>.
+ *
+ * Based in part upon the original driver by Mellanox Technologies
+ * Ltd.  Portions may be Copyright (c) Mellanox Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+#include <errno.h>
+#include <timer.h>
+#include <byteswap.h>
+#include <gpxe/pci.h>
+#include <gpxe/malloc.h>
+#include <gpxe/umalloc.h>
+#include <gpxe/iobuf.h>
+#include <gpxe/netdevice.h>
+#include <gpxe/infiniband.h>
+#include <gpxe/ipoib.h>
+#include "hermon.h"
+
+/**
+ * @file
+ *
+ * Mellanox Hermon Infiniband HCA
+ *
+ */
+
+/* Port to use */
+#define PXE_IB_PORT 1
+
+/***************************************************************************
+ *
+ * Queue number allocation
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Allocate offsets within usage bitmask
+ *
+ * @v bits             Usage bitmask
+ * @v bits_len         Length of usage bitmask
+ * @v num_bits         Number of contiguous bits to allocate within bitmask
+ * @ret bit            First free bit within bitmask, or negative error
+ */
+static int hermon_bitmask_alloc ( hermon_bitmask_t *bits,
+                                 unsigned int bits_len,
+                                 unsigned int num_bits ) {
+       unsigned int bit = 0;
+       hermon_bitmask_t mask = 1;
+       unsigned int found = 0;
+
+       /* Search bits for num_bits contiguous free bits */
+       while ( bit < bits_len ) {
+               if ( ( mask & *bits ) == 0 ) {
+                       if ( ++found == num_bits )
+                               goto found;
+               } else {
+                       found = 0;
+               }
+               bit++;
+               mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
+               if ( mask == 1 )
+                       bits++;
+       }
+       return -ENFILE;
+
+ found:
+       /* Mark bits as in-use */
+       do {
+               *bits |= mask;
+               if ( mask == 1 )
+                       bits--;
+               mask = ( mask >> 1 ) | ( mask << ( 8 * sizeof ( mask ) - 1 ) );
+       } while ( --found );
+
+       return ( bit - num_bits + 1 );
+}
+
+/**
+ * Free offsets within usage bitmask
+ *
+ * @v bits             Usage bitmask
+ * @v bit              Starting bit within bitmask
+ * @v num_bits         Number of contiguous bits to free within bitmask
+ */
+static void hermon_bitmask_free ( hermon_bitmask_t *bits,
+                                 int bit, unsigned int num_bits ) {
+       hermon_bitmask_t mask;
+
+       for ( ; num_bits ; bit++, num_bits-- ) {
+               mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
+               bits[ ( bit / ( 8 * sizeof ( mask ) ) ) ] &= ~mask;
+       }
+}
+
+/***************************************************************************
+ *
+ * HCA commands
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Wait for Hermon command completion
+ *
+ * @v hermon           Hermon device
+ * @v hcr              HCA command registers
+ * @ret rc             Return status code
+ */
+static int hermon_cmd_wait ( struct hermon *hermon,
+                            struct hermonprm_hca_command_register *hcr ) {
+       unsigned int wait;
+
+       for ( wait = HERMON_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
+               hcr->u.dwords[6] =
+                       readl ( hermon->config + HERMON_HCR_REG ( 6 ) );
+               if ( ( MLX_GET ( hcr, go ) == 0 ) &&
+                    ( MLX_GET ( hcr, t ) == hermon->toggle ) )
+                       return 0;
+               mdelay ( 1 );
+       }
+       return -EBUSY;
+}
+
+/**
+ * Issue HCA command
+ *
+ * @v hermon           Hermon device
+ * @v command          Command opcode, flags and input/output lengths
+ * @v op_mod           Opcode modifier (0 if no modifier applicable)
+ * @v in               Input parameters
+ * @v in_mod           Input modifier (0 if no modifier applicable)
+ * @v out              Output parameters
+ * @ret rc             Return status code
+ */
+static int hermon_cmd ( struct hermon *hermon, unsigned long command,
+                       unsigned int op_mod, const void *in,
+                       unsigned int in_mod, void *out ) {
+       struct hermonprm_hca_command_register hcr;
+       unsigned int opcode = HERMON_HCR_OPCODE ( command );
+       size_t in_len = HERMON_HCR_IN_LEN ( command );
+       size_t out_len = HERMON_HCR_OUT_LEN ( command );
+       void *in_buffer;
+       void *out_buffer;
+       unsigned int status;
+       unsigned int i;
+       int rc;
+
+       assert ( in_len <= HERMON_MBOX_SIZE );
+       assert ( out_len <= HERMON_MBOX_SIZE );
+
+       DBGC2 ( hermon, "Hermon %p command %02x in %zx%s out %zx%s\n",
+               hermon, opcode, in_len,
+               ( ( command & HERMON_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
+               ( ( command & HERMON_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
+
+       /* Check that HCR is free */
+       if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p command interface locked\n",
+                      hermon );
+               return rc;
+       }
+
+       /* Flip HCR toggle */
+       hermon->toggle = ( 1 - hermon->toggle );
+
+       /* Prepare HCR */
+       memset ( &hcr, 0, sizeof ( hcr ) );
+       in_buffer = &hcr.u.dwords[0];
+       if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
+               in_buffer = hermon->mailbox_in;
+               MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
+       }
+       memcpy ( in_buffer, in, in_len );
+       MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
+       out_buffer = &hcr.u.dwords[3];
+       if ( out_len && ( command & HERMON_HCR_OUT_MBOX ) ) {
+               out_buffer = hermon->mailbox_out;
+               MLX_FILL_1 ( &hcr, 4, out_param_l,
+                            virt_to_bus ( out_buffer ) );
+       }
+       MLX_FILL_4 ( &hcr, 6,
+                    opcode, opcode,
+                    opcode_modifier, op_mod,
+                    go, 1,
+                    t, hermon->toggle );
+       DBGC ( hermon, "Hermon %p issuing command:\n", hermon );
+       DBGC_HDA ( hermon, virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
+                  &hcr, sizeof ( hcr ) );
+       if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
+               DBGC2 ( hermon, "Input mailbox:\n" );
+               DBGC2_HDA ( hermon, virt_to_phys ( in_buffer ), in_buffer,
+                           ( ( in_len < 512 ) ? in_len : 512 ) );
+       }
+
+       /* Issue command */
+       for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
+             i++ ) {
+               writel ( hcr.u.dwords[i],
+                        hermon->config + HERMON_HCR_REG ( i ) );
+               barrier();
+       }
+
+       /* Wait for command completion */
+       if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p timed out waiting for command:\n",
+                      hermon );
+               DBGC_HDA ( hermon,
+                          virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
+                          &hcr, sizeof ( hcr ) );
+               return rc;
+       }
+
+       /* Check command status */
+       status = MLX_GET ( &hcr, status );
+       if ( status != 0 ) {
+               DBGC ( hermon, "Hermon %p command failed with status %02x:\n",
+                      hermon, status );
+               DBGC_HDA ( hermon,
+                          virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
+                          &hcr, sizeof ( hcr ) );
+               return -EIO;
+       }
+
+       /* Read output parameters, if any */
+       hcr.u.dwords[3] = readl ( hermon->config + HERMON_HCR_REG ( 3 ) );
+       hcr.u.dwords[4] = readl ( hermon->config + HERMON_HCR_REG ( 4 ) );
+       memcpy ( out, out_buffer, out_len );
+       if ( out_len ) {
+               DBGC2 ( hermon, "Output%s:\n",
+                       ( command & HERMON_HCR_OUT_MBOX ) ? " mailbox" : "" );
+               DBGC2_HDA ( hermon, virt_to_phys ( out_buffer ), out_buffer,
+                           ( ( out_len < 512 ) ? out_len : 512 ) );
+       }
+
+       return 0;
+}
+
+static inline int
+hermon_cmd_query_dev_cap ( struct hermon *hermon,
+                          struct hermonprm_query_dev_cap *dev_cap ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_DEV_CAP,
+                                                1, sizeof ( *dev_cap ) ),
+                           0, NULL, 0, dev_cap );
+}
+
+static inline int
+hermon_cmd_query_fw ( struct hermon *hermon, struct hermonprm_query_fw *fw ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_FW,
+                                                1, sizeof ( *fw ) ),
+                           0, NULL, 0, fw );
+}
+
+static inline int
+hermon_cmd_init_hca ( struct hermon *hermon,
+                     const struct hermonprm_init_hca *init_hca ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_HCA,
+                                               1, sizeof ( *init_hca ) ),
+                           0, init_hca, 0, NULL );
+}
+
+static inline int
+hermon_cmd_close_hca ( struct hermon *hermon ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_HCA ),
+                           0, NULL, 0, NULL );
+}
+
+static inline int
+hermon_cmd_init_port ( struct hermon *hermon, unsigned int port,
+                      const struct hermonprm_init_port *init_port ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_PORT,
+                                               1, sizeof ( *init_port ) ),
+                           0, init_port, port, NULL );
+}
+
+static inline int
+hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_PORT ),
+                           0, NULL, port, NULL );
+}
+
+static inline int
+hermon_cmd_sw2hw_mpt ( struct hermon *hermon, unsigned int index,
+                      const struct hermonprm_mpt *mpt ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_MPT,
+                                               1, sizeof ( *mpt ) ),
+                           0, mpt, index, NULL );
+}
+
+static inline int
+hermon_cmd_write_mtt ( struct hermon *hermon,
+                      const struct hermonprm_write_mtt *write_mtt ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MTT,
+                                               1, sizeof ( *write_mtt ) ),
+                           0, write_mtt, 1, NULL );
+}
+
+static inline int
+hermon_cmd_sw2hw_eq ( struct hermon *hermon, unsigned int index,
+                     const struct hermonprm_eqc *eqc ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_EQ,
+                                               1, sizeof ( *eqc ) ),
+                           0, eqc, index, NULL );
+}
+
+static inline int
+hermon_cmd_hw2sw_eq ( struct hermon *hermon, unsigned int index ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_VOID_CMD ( HERMON_HCR_HW2SW_EQ ),
+                           1, NULL, index, NULL );
+}
+
+static inline int
+hermon_cmd_sw2hw_cq ( struct hermon *hermon, unsigned long cqn,
+                     const struct hermonprm_completion_queue_context *cqctx ){
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_CQ,
+                                               1, sizeof ( *cqctx ) ),
+                           0, cqctx, cqn, NULL );
+}
+
+static inline int
+hermon_cmd_hw2sw_cq ( struct hermon *hermon, unsigned long cqn,
+                     struct hermonprm_completion_queue_context *cqctx) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_CQ,
+                                                1, sizeof ( *cqctx ) ),
+                           0, NULL, cqn, cqctx );
+}
+
+static inline int
+hermon_cmd_rst2init_qp ( struct hermon *hermon, unsigned long qpn,
+                        const struct hermonprm_qp_ee_state_transitions *ctx ){
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_RST2INIT_QP,
+                                               1, sizeof ( *ctx ) ),
+                           0, ctx, qpn, NULL );
+}
+
+static inline int
+hermon_cmd_init2rtr_qp ( struct hermon *hermon, unsigned long qpn,
+                        const struct hermonprm_qp_ee_state_transitions *ctx ){
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_INIT2RTR_QP,
+                                               1, sizeof ( *ctx ) ),
+                           0, ctx, qpn, NULL );
+}
+
+static inline int
+hermon_cmd_rtr2rts_qp ( struct hermon *hermon, unsigned long qpn,
+                       const struct hermonprm_qp_ee_state_transitions *ctx ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_RTR2RTS_QP,
+                                               1, sizeof ( *ctx ) ),
+                           0, ctx, qpn, NULL );
+}
+
+static inline int
+hermon_cmd_2rst_qp ( struct hermon *hermon, unsigned long qpn ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_VOID_CMD ( HERMON_HCR_2RST_QP ),
+                           0x03, NULL, qpn, NULL );
+}
+
+static inline int
+hermon_cmd_mad_ifc ( struct hermon *hermon, union hermonprm_mad *mad ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_INOUT_CMD ( HERMON_HCR_MAD_IFC,
+                                                  1, sizeof ( *mad ),
+                                                  1, sizeof ( *mad ) ),
+                           0x03, mad, PXE_IB_PORT, mad );
+}
+
+static inline int
+hermon_cmd_read_mcg ( struct hermon *hermon, unsigned int index,
+                     struct hermonprm_mcg_entry *mcg ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_OUT_CMD ( HERMON_HCR_READ_MCG,
+                                                1, sizeof ( *mcg ) ),
+                           0, NULL, index, mcg );
+}
+
+static inline int
+hermon_cmd_write_mcg ( struct hermon *hermon, unsigned int index,
+                      const struct hermonprm_mcg_entry *mcg ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MCG,
+                                               1, sizeof ( *mcg ) ),
+                           0, mcg, index, NULL );
+}
+
+static inline int
+hermon_cmd_mgid_hash ( struct hermon *hermon, const struct ib_gid *gid,
+                      struct hermonprm_mgm_hash *hash ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_INOUT_CMD ( HERMON_HCR_MGID_HASH,
+                                                  1, sizeof ( *gid ),
+                                                  0, sizeof ( *hash ) ),
+                           0, gid, 0, hash );
+}
+
+static inline int
+hermon_cmd_run_fw ( struct hermon *hermon ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_VOID_CMD ( HERMON_HCR_RUN_FW ),
+                           0, NULL, 0, NULL );
+}
+
+static inline int
+hermon_cmd_unmap_icm ( struct hermon *hermon, unsigned int page_count,
+                      const struct hermonprm_scalar_parameter *offset ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_UNMAP_ICM,
+                                               0, sizeof ( *offset ) ),
+                           0, offset, page_count, NULL );
+}
+
+static inline int
+hermon_cmd_map_icm ( struct hermon *hermon,
+                    const struct hermonprm_virtual_physical_mapping *map ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM,
+                                               1, sizeof ( *map ) ),
+                           0, map, 1, NULL );
+}
+
+static inline int
+hermon_cmd_unmap_icm_aux ( struct hermon *hermon ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_ICM_AUX ),
+                           0, NULL, 0, NULL );
+}
+
+static inline int
+hermon_cmd_map_icm_aux ( struct hermon *hermon,
+                      const struct hermonprm_virtual_physical_mapping *map ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM_AUX,
+                                               1, sizeof ( *map ) ),
+                           0, map, 1, NULL );
+}
+
+static inline int
+hermon_cmd_set_icm_size ( struct hermon *hermon,
+                         const struct hermonprm_scalar_parameter *icm_size,
+                         struct hermonprm_scalar_parameter *icm_aux_size ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_INOUT_CMD ( HERMON_HCR_SET_ICM_SIZE,
+                                                  0, sizeof ( *icm_size ),
+                                                  0, sizeof (*icm_aux_size) ),
+                           0, icm_size, 0, icm_aux_size );
+}
+
+static inline int
+hermon_cmd_unmap_fa ( struct hermon *hermon ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_FA ),
+                           0, NULL, 0, NULL );
+}
+
+static inline int
+hermon_cmd_map_fa ( struct hermon *hermon,
+                   const struct hermonprm_virtual_physical_mapping *map ) {
+       return hermon_cmd ( hermon,
+                           HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_FA,
+                                               1, sizeof ( *map ) ),
+                           0, map, 1, NULL );
+}
+
+/***************************************************************************
+ *
+ * Memory translation table operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Allocate MTT entries
+ *
+ * @v hermon           Hermon device
+ * @v memory           Memory to map into MTT
+ * @v len              Length of memory to map
+ * @v mtt              MTT descriptor to fill in
+ * @ret rc             Return status code
+ */
+static int hermon_alloc_mtt ( struct hermon *hermon,
+                             const void *memory, size_t len,
+                             struct hermon_mtt *mtt ) {
+       struct hermonprm_write_mtt write_mtt;
+       physaddr_t start;
+       unsigned int page_offset;
+       unsigned int num_pages;
+       int mtt_offset;
+       unsigned int mtt_base_addr;
+       unsigned int i;
+       int rc;
+
+       /* Find available MTT entries */
+       start = virt_to_phys ( memory );
+       page_offset = ( start & ( HERMON_PAGE_SIZE - 1 ) );
+       start -= page_offset;
+       len += page_offset;
+       num_pages = ( ( len + HERMON_PAGE_SIZE - 1 ) / HERMON_PAGE_SIZE );
+       mtt_offset = hermon_bitmask_alloc ( hermon->mtt_inuse, HERMON_MAX_MTTS,
+                                           num_pages );
+       if ( mtt_offset < 0 ) {
+               DBGC ( hermon, "Hermon %p could not allocate %d MTT entries\n",
+                      hermon, num_pages );
+               rc = mtt_offset;
+               goto err_mtt_offset;
+       }
+       mtt_base_addr = ( ( hermon->cap.reserved_mtts + mtt_offset ) *
+                         hermon->cap.mtt_entry_size );
+
+       /* Fill in MTT structure */
+       mtt->mtt_offset = mtt_offset;
+       mtt->num_pages = num_pages;
+       mtt->mtt_base_addr = mtt_base_addr;
+       mtt->page_offset = page_offset;
+
+       /* Construct and issue WRITE_MTT commands */
+       for ( i = 0 ; i < num_pages ; i++ ) {
+               memset ( &write_mtt, 0, sizeof ( write_mtt ) );
+               MLX_FILL_1 ( &write_mtt.mtt_base_addr, 1,
+                            value, mtt_base_addr );
+               MLX_FILL_2 ( &write_mtt.mtt, 1,
+                            p, 1,
+                            ptag_l, ( start >> 3 ) );
+               if ( ( rc = hermon_cmd_write_mtt ( hermon,
+                                                  &write_mtt ) ) != 0 ) {
+                       DBGC ( hermon, "Hermon %p could not write MTT at %x\n",
+                              hermon, mtt_base_addr );
+                       goto err_write_mtt;
+               }
+               start += HERMON_PAGE_SIZE;
+               mtt_base_addr += hermon->cap.mtt_entry_size;
+       }
+
+       return 0;
+
+ err_write_mtt:
+       hermon_bitmask_free ( hermon->mtt_inuse, mtt_offset, num_pages );
+ err_mtt_offset:
+       return rc;
+}
+
+/**
+ * Free MTT entries
+ *
+ * @v hermon           Hermon device
+ * @v mtt              MTT descriptor
+ */
+static void hermon_free_mtt ( struct hermon *hermon,
+                             struct hermon_mtt *mtt ) {
+       hermon_bitmask_free ( hermon->mtt_inuse, mtt->mtt_offset,
+                             mtt->num_pages );
+}
+
+/***************************************************************************
+ *
+ * Completion queue operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Create completion queue
+ *
+ * @v ibdev            Infiniband device
+ * @v cq               Completion queue
+ * @ret rc             Return status code
+ */
+static int hermon_create_cq ( struct ib_device *ibdev,
+                             struct ib_completion_queue *cq ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermon_completion_queue *hermon_cq;
+       struct hermonprm_completion_queue_context cqctx;
+       int cqn_offset;
+       unsigned int i;
+       int rc;
+
+       /* Find a free completion queue number */
+       cqn_offset = hermon_bitmask_alloc ( hermon->cq_inuse,
+                                           HERMON_MAX_CQS, 1 );
+       if ( cqn_offset < 0 ) {
+               DBGC ( hermon, "Hermon %p out of completion queues\n",
+                      hermon );
+               rc = cqn_offset;
+               goto err_cqn_offset;
+       }
+       cq->cqn = ( hermon->cap.reserved_cqs + cqn_offset );
+
+       /* Allocate control structures */
+       hermon_cq = zalloc ( sizeof ( *hermon_cq ) );
+       if ( ! hermon_cq ) {
+               rc = -ENOMEM;
+               goto err_hermon_cq;
+       }
+
+       /* Allocate completion queue itself */
+       hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
+       hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size,
+                                     sizeof ( hermon_cq->cqe[0] ) );
+       if ( ! hermon_cq->cqe ) {
+               rc = -ENOMEM;
+               goto err_cqe;
+       }
+       memset ( hermon_cq->cqe, 0, hermon_cq->cqe_size );
+       for ( i = 0 ; i < cq->num_cqes ; i++ ) {
+               MLX_FILL_1 ( &hermon_cq->cqe[i].normal, 7, owner, 1 );
+       }
+       barrier();
+
+       /* Allocate MTT entries */
+       if ( ( rc = hermon_alloc_mtt ( hermon, hermon_cq->cqe,
+                                      hermon_cq->cqe_size,
+                                      &hermon_cq->mtt ) ) != 0 )
+               goto err_alloc_mtt;
+
+       /* Hand queue over to hardware */
+       memset ( &cqctx, 0, sizeof ( cqctx ) );
+       MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
+       MLX_FILL_1 ( &cqctx, 2,
+                    page_offset, ( hermon_cq->mtt.page_offset >> 5 ) );
+       MLX_FILL_2 ( &cqctx, 3,
+                    usr_page, HERMON_UAR_PAGE,
+                    log_cq_size, fls ( cq->num_cqes - 1 ) );
+       MLX_FILL_1 ( &cqctx, 7, mtt_base_addr_l,
+                    ( hermon_cq->mtt.mtt_base_addr >> 3 ) );
+       MLX_FILL_1 ( &cqctx, 15, db_record_addr_l,
+                    ( virt_to_phys ( &hermon_cq->doorbell ) >> 3 ) );
+       if ( ( rc = hermon_cmd_sw2hw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p SW2HW_CQ failed: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_sw2hw_cq;
+       }
+
+       DBGC ( hermon, "Hermon %p CQN %#lx ring at [%p,%p)\n",
+              hermon, cq->cqn, hermon_cq->cqe,
+              ( ( ( void * ) hermon_cq->cqe ) + hermon_cq->cqe_size ) );
+       cq->dev_priv = hermon_cq;
+       return 0;
+
+ err_sw2hw_cq:
+       hermon_free_mtt ( hermon, &hermon_cq->mtt );
+ err_alloc_mtt:
+       free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
+ err_cqe:
+       free ( hermon_cq );
+ err_hermon_cq:
+       hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
+ err_cqn_offset:
+       return rc;
+}
+
+/**
+ * Destroy completion queue
+ *
+ * @v ibdev            Infiniband device
+ * @v cq               Completion queue
+ */
+static void hermon_destroy_cq ( struct ib_device *ibdev,
+                               struct ib_completion_queue *cq ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermon_completion_queue *hermon_cq = cq->dev_priv;
+       struct hermonprm_completion_queue_context cqctx;
+       int cqn_offset;
+       int rc;
+
+       /* Take ownership back from hardware */
+       if ( ( rc = hermon_cmd_hw2sw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p FATAL HW2SW_CQ failed on CQN %#lx: "
+                      "%s\n", hermon, cq->cqn, strerror ( rc ) );
+               /* Leak memory and return; at least we avoid corruption */
+               return;
+       }
+
+       /* Free MTT entries */
+       hermon_free_mtt ( hermon, &hermon_cq->mtt );
+
+       /* Free memory */
+       free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
+       free ( hermon_cq );
+
+       /* Mark queue number as free */
+       cqn_offset = ( cq->cqn - hermon->cap.reserved_cqs );
+       hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
+
+       cq->dev_priv = NULL;
+}
+
+/***************************************************************************
+ *
+ * Queue pair operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Create queue pair
+ *
+ * @v ibdev            Infiniband device
+ * @v qp               Queue pair
+ * @ret rc             Return status code
+ */
+static int hermon_create_qp ( struct ib_device *ibdev,
+                             struct ib_queue_pair *qp ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermon_queue_pair *hermon_qp;
+       struct hermonprm_qp_ee_state_transitions qpctx;
+       int qpn_offset;
+       int rc;
+
+       /* Find a free queue pair number */
+       qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
+                                           HERMON_MAX_QPS, 1 );
+       if ( qpn_offset < 0 ) {
+               DBGC ( hermon, "Hermon %p out of queue pairs\n", hermon );
+               rc = qpn_offset;
+               goto err_qpn_offset;
+       }
+       qp->qpn = ( HERMON_QPN_BASE + hermon->cap.reserved_qps +
+                   qpn_offset );
+
+       /* Allocate control structures */
+       hermon_qp = zalloc ( sizeof ( *hermon_qp ) );
+       if ( ! hermon_qp ) {
+               rc = -ENOMEM;
+               goto err_hermon_qp;
+       }
+
+       /* Allocate work queue buffer */
+       hermon_qp->send.num_wqes = ( qp->send.num_wqes /* headroom */ + 1 +
+                               ( 2048 / sizeof ( hermon_qp->send.wqe[0] ) ) );
+       hermon_qp->send.num_wqes =
+               ( 1 << fls ( hermon_qp->send.num_wqes - 1 ) ); /* round up */
+       hermon_qp->send.wqe_size = ( hermon_qp->send.num_wqes *
+                                    sizeof ( hermon_qp->send.wqe[0] ) );
+       hermon_qp->recv.wqe_size = ( qp->recv.num_wqes *
+                                    sizeof ( hermon_qp->recv.wqe[0] ) );
+       hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
+                               hermon_qp->recv.wqe_size );
+       hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
+                                     sizeof ( hermon_qp->send.wqe[0] ) );
+       if ( ! hermon_qp->wqe ) {
+               rc = -ENOMEM;
+               goto err_alloc_wqe;
+       }
+       hermon_qp->send.wqe = hermon_qp->wqe;
+       memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
+       hermon_qp->recv.wqe = ( hermon_qp->wqe + hermon_qp->send.wqe_size );
+       memset ( hermon_qp->recv.wqe, 0, hermon_qp->recv.wqe_size );
+
+       /* Allocate MTT entries */
+       if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe,
+                                      hermon_qp->wqe_size,
+                                      &hermon_qp->mtt ) ) != 0 ) {
+               goto err_alloc_mtt;
+       }
+
+       /* Hand queue over to hardware */
+       memset ( &qpctx, 0, sizeof ( qpctx ) );
+       MLX_FILL_2 ( &qpctx, 2,
+                    qpc_eec_data.pm_state, 0x03 /* Always 0x03 for UD */,
+                    qpc_eec_data.st, HERMON_ST_UD );
+       MLX_FILL_1 ( &qpctx, 3, qpc_eec_data.pd, HERMON_GLOBAL_PD );
+       MLX_FILL_4 ( &qpctx, 4,
+                    qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
+                    qpc_eec_data.log_rq_stride,
+                    ( fls ( sizeof ( hermon_qp->recv.wqe[0] ) - 1 ) - 4 ),
+                    qpc_eec_data.log_sq_size,
+                    fls ( hermon_qp->send.num_wqes - 1 ),
+                    qpc_eec_data.log_sq_stride,
+                    ( fls ( sizeof ( hermon_qp->send.wqe[0] ) - 1 ) - 4 ) );
+       MLX_FILL_1 ( &qpctx, 5,
+                    qpc_eec_data.usr_page, HERMON_UAR_PAGE );
+       MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
+       MLX_FILL_1 ( &qpctx, 38, qpc_eec_data.page_offset,
+                    ( hermon_qp->mtt.page_offset >> 6 ) );
+       MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
+       MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.db_record_addr_l,
+                    ( virt_to_phys ( &hermon_qp->recv.doorbell ) >> 2 ) );
+       MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
+       MLX_FILL_1 ( &qpctx, 53, qpc_eec_data.mtt_base_addr_l,
+                    ( hermon_qp->mtt.mtt_base_addr >> 3 ) );
+       if ( ( rc = hermon_cmd_rst2init_qp ( hermon, qp->qpn,
+                                            &qpctx ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p RST2INIT_QP failed: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_rst2init_qp;
+       }
+
+       memset ( &qpctx, 0, sizeof ( qpctx ) );
+       MLX_FILL_2 ( &qpctx, 4,
+                    qpc_eec_data.mtu, HERMON_MTU_2048,
+                    qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */ );
+       MLX_FILL_1 ( &qpctx, 16,
+                    qpc_eec_data.primary_address_path.sched_queue,
+                    ( 0x83 /* default policy */ |
+                      ( ( PXE_IB_PORT - 1 ) << 6 ) ) );
+       if ( ( rc = hermon_cmd_init2rtr_qp ( hermon, qp->qpn,
+                                            &qpctx ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p INIT2RTR_QP failed: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_init2rtr_qp;
+       }
+       memset ( &qpctx, 0, sizeof ( qpctx ) );
+       if ( ( rc = hermon_cmd_rtr2rts_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ){
+               DBGC ( hermon, "Hermon %p RTR2RTS_QP failed: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_rtr2rts_qp;
+       }
+
+       DBGC ( hermon, "Hermon %p QPN %#lx send ring at [%p,%p)\n",
+              hermon, qp->qpn, hermon_qp->send.wqe,
+              ( ((void *)hermon_qp->send.wqe ) + hermon_qp->send.wqe_size ) );
+       DBGC ( hermon, "Hermon %p QPN %#lx receive ring at [%p,%p)\n",
+              hermon, qp->qpn, hermon_qp->recv.wqe,
+              ( ((void *)hermon_qp->recv.wqe ) + hermon_qp->recv.wqe_size ) );
+       qp->dev_priv = hermon_qp;
+       return 0;
+
+ err_rtr2rts_qp:
+ err_init2rtr_qp:
+       hermon_cmd_2rst_qp ( hermon, qp->qpn );
+ err_rst2init_qp:
+       hermon_free_mtt ( hermon, &hermon_qp->mtt );
+ err_alloc_mtt:
+       free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
+ err_alloc_wqe:
+       free ( hermon_qp );
+ err_hermon_qp:
+       hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
+ err_qpn_offset:
+       return rc;
+}
+
+/**
+ * Destroy queue pair
+ *
+ * @v ibdev            Infiniband device
+ * @v qp               Queue pair
+ */
+static void hermon_destroy_qp ( struct ib_device *ibdev,
+                               struct ib_queue_pair *qp ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermon_queue_pair *hermon_qp = qp->dev_priv;
+       int qpn_offset;
+       int rc;
+
+       /* Take ownership back from hardware */
+       if ( ( rc = hermon_cmd_2rst_qp ( hermon, qp->qpn ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p FATAL 2RST_QP failed on QPN %#lx: "
+                      "%s\n", hermon, qp->qpn, strerror ( rc ) );
+               /* Leak memory and return; at least we avoid corruption */
+               return;
+       }
+
+       /* Free MTT entries */
+       hermon_free_mtt ( hermon, &hermon_qp->mtt );
+
+       /* Free memory */
+       free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
+       free ( hermon_qp );
+
+       /* Mark queue number as free */
+       qpn_offset = ( qp->qpn - HERMON_QPN_BASE -
+                      hermon->cap.reserved_qps );
+       hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
+
+       qp->dev_priv = NULL;
+}
+
+/***************************************************************************
+ *
+ * Work request operations
+ *
+ ***************************************************************************
+ */
+
+/** GID used for GID-less send work queue entries */
+static const struct ib_gid hermon_no_gid = {
+       { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
+};
+
+/**
+ * Post send work queue entry
+ *
+ * @v ibdev            Infiniband device
+ * @v qp               Queue pair
+ * @v av               Address vector
+ * @v iobuf            I/O buffer
+ * @ret rc             Return status code
+ */
+static int hermon_post_send ( struct ib_device *ibdev,
+                             struct ib_queue_pair *qp,
+                             struct ib_address_vector *av,
+                             struct io_buffer *iobuf ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermon_queue_pair *hermon_qp = qp->dev_priv;
+       struct ib_work_queue *wq = &qp->send;
+       struct hermon_send_work_queue *hermon_send_wq = &hermon_qp->send;
+       struct hermonprm_ud_send_wqe *wqe;
+       const struct ib_gid *gid;
+       union hermonprm_doorbell_register db_reg;
+       unsigned int wqe_idx_mask;
+
+       /* Allocate work queue entry */
+       wqe_idx_mask = ( wq->num_wqes - 1 );
+       if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
+               DBGC ( hermon, "Hermon %p send queue full", hermon );
+               return -ENOBUFS;
+       }
+       wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
+       wqe = &hermon_send_wq->wqe[ wq->next_idx &
+                                   ( hermon_send_wq->num_wqes - 1 ) ].ud;
+
+       /* Construct work queue entry */
+       memset ( ( ( ( void * ) wqe ) + 4 /* avoid ctrl.owner */ ), 0,
+                  ( sizeof ( *wqe ) - 4 ) );
+       MLX_FILL_1 ( &wqe->ctrl, 1, ds, ( sizeof ( *wqe ) / 16 ) );
+       MLX_FILL_1 ( &wqe->ctrl, 2, c, 0x03 /* generate completion */ );
+       MLX_FILL_2 ( &wqe->ud, 0,
+                    ud_address_vector.pd, HERMON_GLOBAL_PD,
+                    ud_address_vector.port_number, PXE_IB_PORT );
+       MLX_FILL_2 ( &wqe->ud, 1,
+                    ud_address_vector.rlid, av->dlid,
+                    ud_address_vector.g, av->gid_present );
+       MLX_FILL_1 ( &wqe->ud, 2,
+                    ud_address_vector.max_stat_rate,
+                    ( ( ( av->rate < 2 ) || ( av->rate > 10 ) ) ?
+                      8 : ( av->rate + 5 ) ) );
+       MLX_FILL_1 ( &wqe->ud, 3, ud_address_vector.sl, av->sl );
+       gid = ( av->gid_present ? &av->gid : &hermon_no_gid );
+       memcpy ( &wqe->ud.u.dwords[4], gid, sizeof ( *gid ) );
+       MLX_FILL_1 ( &wqe->ud, 8, destination_qp, av->dest_qp );
+       MLX_FILL_1 ( &wqe->ud, 9, q_key, av->qkey );
+       MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_len ( iobuf ) );
+       MLX_FILL_1 ( &wqe->data[0], 1, l_key, hermon->reserved_lkey );
+       MLX_FILL_1 ( &wqe->data[0], 3,
+                    local_address_l, virt_to_bus ( iobuf->data ) );
+       barrier();
+       MLX_FILL_2 ( &wqe->ctrl, 0,
+                    opcode, HERMON_OPCODE_SEND,
+                    owner,
+                    ( ( wq->next_idx & hermon_send_wq->num_wqes ) ? 1 : 0 ) );
+       DBGCP ( hermon, "Hermon %p posting send WQE:\n", hermon );
+       DBGCP_HD ( hermon, wqe, sizeof ( *wqe ) );
+       barrier();
+
+       /* Ring doorbell register */
+       MLX_FILL_1 ( &db_reg.send, 0, qn, qp->qpn );
+       DBGCP ( hermon, "Ringing doorbell %08lx with %08lx\n",
+               virt_to_phys ( hermon->uar + HERMON_DB_POST_SND_OFFSET ),
+               db_reg.dword[0] );
+       writel ( db_reg.dword[0], ( hermon->uar + HERMON_DB_POST_SND_OFFSET ));
+
+       /* Update work queue's index */
+       wq->next_idx++;
+
+       return 0;
+}
+
+/**
+ * Post receive work queue entry
+ *
+ * @v ibdev            Infiniband device
+ * @v qp               Queue pair
+ * @v iobuf            I/O buffer
+ * @ret rc             Return status code
+ */
+static int hermon_post_recv ( struct ib_device *ibdev,
+                             struct ib_queue_pair *qp,
+                             struct io_buffer *iobuf ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermon_queue_pair *hermon_qp = qp->dev_priv;
+       struct ib_work_queue *wq = &qp->recv;
+       struct hermon_recv_work_queue *hermon_recv_wq = &hermon_qp->recv;
+       struct hermonprm_recv_wqe *wqe;
+       unsigned int wqe_idx_mask;
+
+       /* Allocate work queue entry */
+       wqe_idx_mask = ( wq->num_wqes - 1 );
+       if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
+               DBGC ( hermon, "Hermon %p receive queue full", hermon );
+               return -ENOBUFS;
+       }
+       wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
+       wqe = &hermon_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
+
+       /* Construct work queue entry */
+       MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
+       MLX_FILL_1 ( &wqe->data[0], 1, l_key, hermon->reserved_lkey );
+       MLX_FILL_1 ( &wqe->data[0], 3,
+                    local_address_l, virt_to_bus ( iobuf->data ) );
+
+       /* Update work queue's index */
+       wq->next_idx++;
+
+       /* Update doorbell record */
+       barrier();
+       MLX_FILL_1 ( &hermon_recv_wq->doorbell, 0, receive_wqe_counter,
+                    ( wq->next_idx & 0xffff ) );
+
+       return 0;
+}
+
+/**
+ * Handle completion
+ *
+ * @v ibdev            Infiniband device
+ * @v cq               Completion queue
+ * @v cqe              Hardware completion queue entry
+ * @v complete_send    Send completion handler
+ * @v complete_recv    Receive completion handler
+ * @ret rc             Return status code
+ */
+static int hermon_complete ( struct ib_device *ibdev,
+                            struct ib_completion_queue *cq,
+                            union hermonprm_completion_entry *cqe,
+                            ib_completer_t complete_send,
+                            ib_completer_t complete_recv ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct ib_completion completion;
+       struct ib_work_queue *wq;
+       struct ib_queue_pair *qp;
+       struct hermon_queue_pair *hermon_qp;
+       struct io_buffer *iobuf;
+       ib_completer_t complete;
+       unsigned int opcode;
+       unsigned long qpn;
+       int is_send;
+       unsigned int wqe_idx;
+       int rc = 0;
+
+       /* Parse completion */
+       memset ( &completion, 0, sizeof ( completion ) );
+       qpn = MLX_GET ( &cqe->normal, qpn );
+       is_send = MLX_GET ( &cqe->normal, s_r );
+       opcode = MLX_GET ( &cqe->normal, opcode );
+       if ( opcode >= HERMON_OPCODE_RECV_ERROR ) {
+               /* "s" field is not valid for error opcodes */
+               is_send = ( opcode == HERMON_OPCODE_SEND_ERROR );
+               completion.syndrome = MLX_GET ( &cqe->error, syndrome );
+               DBGC ( hermon, "Hermon %p CQN %lx syndrome %x vendor %lx\n",
+                      hermon, cq->cqn, completion.syndrome,
+                      MLX_GET ( &cqe->error, vendor_error_syndrome ) );
+               rc = -EIO;
+               /* Don't return immediately; propagate error to completer */
+       }
+
+       /* Identify work queue */
+       wq = ib_find_wq ( cq, qpn, is_send );
+       if ( ! wq ) {
+               DBGC ( hermon, "Hermon %p CQN %lx unknown %s QPN %lx\n",
+                      hermon, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
+               return -EIO;
+       }
+       qp = wq->qp;
+       hermon_qp = qp->dev_priv;
+
+       /* Identify I/O buffer */
+       wqe_idx = ( MLX_GET ( &cqe->normal, wqe_counter ) &
+                   ( wq->num_wqes - 1 ) );
+       iobuf = wq->iobufs[wqe_idx];
+       if ( ! iobuf ) {
+               DBGC ( hermon, "Hermon %p CQN %lx QPN %lx empty WQE %x\n",
+                      hermon, cq->cqn, qpn, wqe_idx );
+               return -EIO;
+       }
+       wq->iobufs[wqe_idx] = NULL;
+
+       /* Fill in length for received packets */
+       if ( ! is_send ) {
+               completion.len = MLX_GET ( &cqe->normal, byte_cnt );
+               if ( completion.len > iob_tailroom ( iobuf ) ) {
+                       DBGC ( hermon, "Hermon %p CQN %lx QPN %lx IDX %x "
+                              "overlength received packet length %zd\n",
+                              hermon, cq->cqn, qpn, wqe_idx, completion.len );
+                       return -EIO;
+               }
+       }
+
+       /* Pass off to caller's completion handler */
+       complete = ( is_send ? complete_send : complete_recv );
+       complete ( ibdev, qp, &completion, iobuf );
+
+       return rc;
+}
+
+/**
+ * Poll completion queue
+ *
+ * @v ibdev            Infiniband device
+ * @v cq               Completion queue
+ * @v complete_send    Send completion handler
+ * @v complete_recv    Receive completion handler
+ */
+static void hermon_poll_cq ( struct ib_device *ibdev,
+                            struct ib_completion_queue *cq,
+                            ib_completer_t complete_send,
+                            ib_completer_t complete_recv ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermon_completion_queue *hermon_cq = cq->dev_priv;
+       union hermonprm_completion_entry *cqe;
+       unsigned int cqe_idx_mask;
+       int rc;
+
+       while ( 1 ) {
+               /* Look for completion entry */
+               cqe_idx_mask = ( cq->num_cqes - 1 );
+               cqe = &hermon_cq->cqe[cq->next_idx & cqe_idx_mask];
+               if ( MLX_GET ( &cqe->normal, owner ) ^
+                    ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
+                       /* Entry still owned by hardware; end of poll */
+                       break;
+               }
+               DBGCP ( hermon, "Hermon %p completion:\n", hermon );
+               DBGCP_HD ( hermon, cqe, sizeof ( *cqe ) );
+
+               /* Handle completion */
+               if ( ( rc = hermon_complete ( ibdev, cq, cqe, complete_send,
+                                             complete_recv ) ) != 0 ) {
+                       DBGC ( hermon, "Hermon %p failed to complete: %s\n",
+                              hermon, strerror ( rc ) );
+                       DBGC_HD ( hermon, cqe, sizeof ( *cqe ) );
+               }
+
+               /* Update completion queue's index */
+               cq->next_idx++;
+
+               /* Update doorbell record */
+               MLX_FILL_1 ( &hermon_cq->doorbell, 0, update_ci,
+                            ( cq->next_idx & 0xffffffUL ) );
+       }
+}
+
+/***************************************************************************
+ *
+ * Multicast group operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Attach to multicast group
+ *
+ * @v ibdev            Infiniband device
+ * @v qp               Queue pair
+ * @v gid              Multicast GID
+ * @ret rc             Return status code
+ */
+static int hermon_mcast_attach ( struct ib_device *ibdev,
+                                struct ib_queue_pair *qp,
+                                struct ib_gid *gid ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermonprm_mgm_hash hash;
+       struct hermonprm_mcg_entry mcg;
+       unsigned int index;
+       int rc;
+
+       /* Generate hash table index */
+       if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+       index = MLX_GET ( &hash, hash );
+
+       /* Check for existing hash table entry */
+       if ( ( rc = hermon_cmd_read_mcg ( hermon, index, &mcg ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not read MCG %#x: %s\n",
+                      hermon, index, strerror ( rc ) );
+               return rc;
+       }
+       if ( MLX_GET ( &mcg, hdr.members_count ) != 0 ) {
+               /* FIXME: this implementation allows only a single QP
+                * per multicast group, and doesn't handle hash
+                * collisions.  Sufficient for IPoIB but may need to
+                * be extended in future.
+                */
+               DBGC ( hermon, "Hermon %p MGID index %#x already in use\n",
+                      hermon, index );
+               return -EBUSY;
+       }
+
+       /* Update hash table entry */
+       MLX_FILL_1 ( &mcg, 1, hdr.members_count, 1 );
+       MLX_FILL_1 ( &mcg, 8, qp[0].qpn, qp->qpn );
+       memcpy ( &mcg.u.dwords[4], gid, sizeof ( *gid ) );
+       if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
+                      hermon, index, strerror ( rc ) );
+               return rc;
+       }
+
+       return 0;
+}
+
+/**
+ * Detach from multicast group
+ *
+ * @v ibdev            Infiniband device
+ * @v qp               Queue pair
+ * @v gid              Multicast GID
+ */
+static void hermon_mcast_detach ( struct ib_device *ibdev,
+                                 struct ib_queue_pair *qp __unused,
+                                 struct ib_gid *gid ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       struct hermonprm_mgm_hash hash;
+       struct hermonprm_mcg_entry mcg;
+       unsigned int index;
+       int rc;
+
+       /* Generate hash table index */
+       if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
+                      hermon, strerror ( rc ) );
+               return;
+       }
+       index = MLX_GET ( &hash, hash );
+
+       /* Clear hash table entry */
+       memset ( &mcg, 0, sizeof ( mcg ) );
+       if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
+                      hermon, index, strerror ( rc ) );
+               return;
+       }
+}
+
+/** Hermon Infiniband operations */
+static struct ib_device_operations hermon_ib_operations = {
+       .create_cq      = hermon_create_cq,
+       .destroy_cq     = hermon_destroy_cq,
+       .create_qp      = hermon_create_qp,
+       .destroy_qp     = hermon_destroy_qp,
+       .post_send      = hermon_post_send,
+       .post_recv      = hermon_post_recv,
+       .poll_cq        = hermon_poll_cq,
+       .mcast_attach   = hermon_mcast_attach,
+       .mcast_detach   = hermon_mcast_detach,
+};
+
+/***************************************************************************
+ *
+ * MAD IFC operations
+ *
+ ***************************************************************************
+ */
+
+static int hermon_mad_ifc ( struct hermon *hermon,
+                           union hermonprm_mad *mad ) {
+       struct ib_mad_hdr *hdr = &mad->mad.mad_hdr;
+       int rc;
+
+       hdr->base_version = IB_MGMT_BASE_VERSION;
+       if ( ( rc = hermon_cmd_mad_ifc ( hermon, mad ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not issue MAD IFC: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+       if ( hdr->status != 0 ) {
+               DBGC ( hermon, "Hermon %p MAD IFC status %04x\n",
+                      hermon, ntohs ( hdr->status ) );
+               return -EIO;
+       }
+       return 0;
+}
+
+static int hermon_get_port_info ( struct hermon *hermon,
+                                 struct ib_mad_port_info *port_info ) {
+       union hermonprm_mad mad;
+       struct ib_mad_hdr *hdr = &mad.mad.mad_hdr;
+       int rc;
+
+       memset ( &mad, 0, sizeof ( mad ) );
+       hdr->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+       hdr->class_version = 1;
+       hdr->method = IB_MGMT_METHOD_GET;
+       hdr->attr_id = htons ( IB_SMP_ATTR_PORT_INFO );
+       hdr->attr_mod = htonl ( PXE_IB_PORT );
+       if ( ( rc = hermon_mad_ifc ( hermon, &mad ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not get port info: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+       memcpy ( port_info, &mad.mad.port_info, sizeof ( *port_info ) );
+       return 0;
+}
+
+static int hermon_get_guid_info ( struct hermon *hermon,
+                                 struct ib_mad_guid_info *guid_info ) {
+       union hermonprm_mad mad;
+       struct ib_mad_hdr *hdr = &mad.mad.mad_hdr;
+       int rc;
+
+       memset ( &mad, 0, sizeof ( mad ) );
+       hdr->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+       hdr->class_version = 1;
+       hdr->method = IB_MGMT_METHOD_GET;
+       hdr->attr_id = htons ( IB_SMP_ATTR_GUID_INFO );
+       if ( ( rc = hermon_mad_ifc ( hermon, &mad ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not get GUID info: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+       memcpy ( guid_info, &mad.mad.guid_info, sizeof ( *guid_info ) );
+       return 0;
+}
+
+static int hermon_get_pkey_table ( struct hermon *hermon,
+                                  struct ib_mad_pkey_table *pkey_table ) {
+       union hermonprm_mad mad;
+       struct ib_mad_hdr *hdr = &mad.mad.mad_hdr;
+       int rc;
+
+       memset ( &mad, 0, sizeof ( mad ) );
+       hdr->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+       hdr->class_version = 1;
+       hdr->method = IB_MGMT_METHOD_GET;
+       hdr->attr_id = htons ( IB_SMP_ATTR_PKEY_TABLE );
+       if ( ( rc = hermon_mad_ifc ( hermon, &mad ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not get pkey table: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+       memcpy ( pkey_table, &mad.mad.pkey_table, sizeof ( *pkey_table ) );
+       return 0;
+}
+
+static int hermon_get_port_gid ( struct hermon *hermon,
+                                struct ib_gid *port_gid ) {
+       union {
+               /* This union exists just to save stack space */
+               struct ib_mad_port_info port_info;
+               struct ib_mad_guid_info guid_info;
+       } u;
+       int rc;
+
+       /* Port info gives us the first half of the port GID */
+       if ( ( rc = hermon_get_port_info ( hermon, &u.port_info ) ) != 0 )
+               return rc;
+       memcpy ( &port_gid->u.bytes[0], u.port_info.gid_prefix, 8 );
+
+       /* GUID info gives us the second half of the port GID */
+       if ( ( rc = hermon_get_guid_info ( hermon, &u.guid_info ) ) != 0 )
+               return rc;
+       memcpy ( &port_gid->u.bytes[8], u.guid_info.gid_local, 8 );
+
+       return 0;
+}
+
+static int hermon_get_sm_lid ( struct hermon *hermon,
+                              unsigned long *sm_lid ) {
+       struct ib_mad_port_info port_info;
+       int rc;
+
+       if ( ( rc = hermon_get_port_info ( hermon, &port_info ) ) != 0 )
+               return rc;
+       *sm_lid = ntohs ( port_info.mastersm_lid );
+       return 0;
+}
+
+static int hermon_get_pkey ( struct hermon *hermon, unsigned int *pkey ) {
+       struct ib_mad_pkey_table pkey_table;
+       int rc;
+
+       if ( ( rc = hermon_get_pkey_table ( hermon, &pkey_table ) ) != 0 )
+               return rc;
+       *pkey = ntohs ( pkey_table.pkey[0][0] );
+       return 0;
+}
+
+/**
+ * Wait for link up
+ *
+ * @v hermon           Hermon device
+ * @ret rc             Return status code
+ *
+ * This function shouldn't really exist.  Unfortunately, IB links take
+ * a long time to come up, and we can't get various key parameters
+ * e.g. our own IPoIB MAC address without information from the subnet
+ * manager).  We should eventually make link-up an asynchronous event.
+ */
+static int hermon_wait_for_link ( struct hermon *hermon ) {
+       struct ib_mad_port_info port_info;
+       unsigned int retries;
+       int rc;
+
+       printf ( "Waiting for Infiniband link-up..." );
+       for ( retries = 20 ; retries ; retries-- ) {
+               if ( ( rc = hermon_get_port_info ( hermon,
+                                                  &port_info ) ) != 0 )
+                       continue;
+               if ( ( ( port_info.port_state__link_speed_supported ) & 0xf )
+                    == 4 ) {
+                       printf ( "ok\n" );
+                       return 0;
+               }
+               printf ( "." );
+               sleep ( 1 );
+       }
+       printf ( "failed\n" );
+       return -ENODEV;
+};
+
+/**
+ * Get MAD parameters
+ *
+ * @v hermon           Hermon device
+ * @ret rc             Return status code
+ */
+static int hermon_get_mad_params ( struct ib_device *ibdev ) {
+       struct hermon *hermon = ibdev->dev_priv;
+       int rc;
+
+       /* Get subnet manager LID */
+       if ( ( rc = hermon_get_sm_lid ( hermon, &ibdev->sm_lid ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not determine subnet manager "
+                      "LID: %s\n", hermon, strerror ( rc ) );
+               return rc;
+       }
+
+       /* Get port GID */
+       if ( ( rc = hermon_get_port_gid ( hermon, &ibdev->port_gid ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not determine port GID: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+
+       /* Get partition key */
+       if ( ( rc = hermon_get_pkey ( hermon, &ibdev->pkey ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not determine partition key: "
+                      "%s\n", hermon, strerror ( rc ) );
+               return rc;
+       }
+
+       return 0;
+}
+
+/***************************************************************************
+ *
+ * Firmware control
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Start firmware running
+ *
+ * @v hermon           Hermon device
+ * @ret rc             Return status code
+ */
+static int hermon_start_firmware ( struct hermon *hermon ) {
+       struct hermonprm_query_fw fw;
+       struct hermonprm_virtual_physical_mapping map_fa;
+       unsigned int fw_pages;
+       unsigned int log2_fw_pages;
+       size_t fw_size;
+       physaddr_t fw_base;
+       int rc;
+
+       /* Get firmware parameters */
+       if ( ( rc = hermon_cmd_query_fw ( hermon, &fw ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not query firmware: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_query_fw;
+       }
+       DBGC ( hermon, "Hermon %p firmware version %ld.%ld.%ld\n", hermon,
+              MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
+              MLX_GET ( &fw, fw_rev_subminor ) );
+       fw_pages = MLX_GET ( &fw, fw_pages );
+       log2_fw_pages = fls ( fw_pages - 1 );
+       fw_pages = ( 1 << log2_fw_pages );
+       DBGC ( hermon, "Hermon %p requires %d kB for firmware\n",
+              hermon, ( fw_pages * 4 ) );
+
+       /* Allocate firmware pages and map firmware area */
+       fw_size = ( fw_pages * HERMON_PAGE_SIZE );
+       hermon->firmware_area = umalloc ( fw_size );
+       if ( ! hermon->firmware_area ) {
+               rc = -ENOMEM;
+               goto err_alloc_fa;
+       }
+       fw_base = ( user_to_phys ( hermon->firmware_area, fw_size ) &
+                   ~( fw_size - 1 ) );
+       DBGC ( hermon, "Hermon %p firmware area at physical [%lx,%lx)\n",
+              hermon, fw_base, ( fw_base + fw_size ) );
+       memset ( &map_fa, 0, sizeof ( map_fa ) );
+       MLX_FILL_2 ( &map_fa, 3,
+                    log2size, log2_fw_pages,
+                    pa_l, ( fw_base >> 12 ) );
+       if ( ( rc = hermon_cmd_map_fa ( hermon, &map_fa ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not map firmware: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_map_fa;
+       }
+
+       /* Start firmware */
+       if ( ( rc = hermon_cmd_run_fw ( hermon ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not run firmware: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_run_fw;
+       }
+
+       DBGC ( hermon, "Hermon %p firmware started\n", hermon );
+       return 0;
+
+ err_run_fw:
+       hermon_cmd_unmap_fa ( hermon );
+ err_map_fa:
+       ufree ( hermon->firmware_area );
+       hermon->firmware_area = UNULL;
+ err_alloc_fa:
+ err_query_fw:
+       return rc;
+}
+
+/**
+ * Stop firmware running
+ *
+ * @v hermon           Hermon device
+ */
+static void hermon_stop_firmware ( struct hermon *hermon ) {
+       int rc;
+
+       if ( ( rc = hermon_cmd_unmap_fa ( hermon ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p FATAL could not stop firmware: %s\n",
+                      hermon, strerror ( rc ) );
+               /* Leak memory and return; at least we avoid corruption */
+               return;
+       }
+       ufree ( hermon->firmware_area );
+       hermon->firmware_area = UNULL;
+}
+
+/***************************************************************************
+ *
+ * Infinihost Context Memory management
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Get device limits
+ *
+ * @v hermon           Hermon device
+ * @ret rc             Return status code
+ */
+static int hermon_get_cap ( struct hermon *hermon ) {
+       struct hermonprm_query_dev_cap dev_cap;
+       int rc;
+
+       if ( ( rc = hermon_cmd_query_dev_cap ( hermon, &dev_cap ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not get device limits: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+
+       hermon->cap.cmpt_entry_size = MLX_GET ( &dev_cap, c_mpt_entry_sz );
+       hermon->cap.reserved_qps =
+               ( 1 << MLX_GET ( &dev_cap, log2_rsvd_qps ) );
+       hermon->cap.qpc_entry_size = MLX_GET ( &dev_cap, qpc_entry_sz );
+       hermon->cap.altc_entry_size = MLX_GET ( &dev_cap, altc_entry_sz );
+       hermon->cap.auxc_entry_size = MLX_GET ( &dev_cap, aux_entry_sz );
+       hermon->cap.reserved_srqs =
+               ( 1 << MLX_GET ( &dev_cap, log2_rsvd_srqs ) );
+       hermon->cap.srqc_entry_size = MLX_GET ( &dev_cap, srq_entry_sz );
+       hermon->cap.reserved_cqs =
+               ( 1 << MLX_GET ( &dev_cap, log2_rsvd_cqs ) );
+       hermon->cap.cqc_entry_size = MLX_GET ( &dev_cap, cqc_entry_sz );
+       hermon->cap.reserved_eqs = MLX_GET ( &dev_cap, num_rsvd_eqs );
+       hermon->cap.eqc_entry_size = MLX_GET ( &dev_cap, eqc_entry_sz );
+       hermon->cap.reserved_mtts =
+               ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mtts ) );
+       hermon->cap.mtt_entry_size = MLX_GET ( &dev_cap, mtt_entry_sz );
+       hermon->cap.reserved_mrws =
+               ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mrws ) );
+       hermon->cap.dmpt_entry_size = MLX_GET ( &dev_cap, d_mpt_entry_sz );
+       hermon->cap.reserved_uars = MLX_GET ( &dev_cap, num_rsvd_uars );
+
+       return 0;
+}
+
+/**
+ * Get ICM usage
+ *
+ * @v log_num_entries  Log2 of the number of entries
+ * @v entry_size       Entry size
+ * @ret usage          Usage size in ICM
+ */
+static size_t icm_usage ( unsigned int log_num_entries, size_t entry_size ) {
+       size_t usage;
+
+       usage = ( ( 1 << log_num_entries ) * entry_size );
+       usage = ( ( usage + HERMON_PAGE_SIZE - 1 ) &
+                 ~( HERMON_PAGE_SIZE - 1 ) );
+       return usage;
+}
+
+/**
+ * Allocate ICM
+ *
+ * @v hermon           Hermon device
+ * @v init_hca         INIT_HCA structure to fill in
+ * @ret rc             Return status code
+ */
+static int hermon_alloc_icm ( struct hermon *hermon,
+                             struct hermonprm_init_hca *init_hca ) {
+       struct hermonprm_scalar_parameter icm_size;
+       struct hermonprm_scalar_parameter icm_aux_size;
+       struct hermonprm_virtual_physical_mapping map_icm_aux;
+       struct hermonprm_virtual_physical_mapping map_icm;
+       uint64_t icm_offset = 0;
+       unsigned int log_num_qps, log_num_srqs, log_num_cqs, log_num_eqs;
+       unsigned int log_num_mtts, log_num_mpts;
+       size_t cmpt_max_len;
+       size_t qp_cmpt_len, srq_cmpt_len, cq_cmpt_len, eq_cmpt_len;
+       size_t icm_len, icm_aux_len;
+       physaddr_t icm_phys;
+       int i;
+       int rc;
+
+       /*
+        * Start by carving up the ICM virtual address space
+        *
+        */
+
+       /* Calculate number of each object type within ICM */
+       log_num_qps = fls ( hermon->cap.reserved_qps + HERMON_MAX_QPS - 1 );
+       log_num_srqs = fls ( hermon->cap.reserved_srqs - 1 );
+       log_num_cqs = fls ( hermon->cap.reserved_cqs + HERMON_MAX_CQS - 1 );
+       log_num_eqs = fls ( hermon->cap.reserved_eqs + HERMON_MAX_EQS - 1 );
+       log_num_mtts = fls ( hermon->cap.reserved_mtts + HERMON_MAX_MTTS - 1 );
+
+       /* ICM starts with the cMPT tables, which are sparse */
+       cmpt_max_len = ( HERMON_CMPT_MAX_ENTRIES *
+                        ( ( uint64_t ) hermon->cap.cmpt_entry_size ) );
+       qp_cmpt_len = icm_usage ( log_num_qps, hermon->cap.cmpt_entry_size );
+       hermon->icm_map[HERMON_ICM_QP_CMPT].offset = icm_offset;
+       hermon->icm_map[HERMON_ICM_QP_CMPT].len = qp_cmpt_len;
+       icm_offset += cmpt_max_len;
+       srq_cmpt_len = icm_usage ( log_num_srqs, hermon->cap.cmpt_entry_size );
+       hermon->icm_map[HERMON_ICM_SRQ_CMPT].offset = icm_offset;
+       hermon->icm_map[HERMON_ICM_SRQ_CMPT].len = srq_cmpt_len;
+       icm_offset += cmpt_max_len;
+       cq_cmpt_len = icm_usage ( log_num_cqs, hermon->cap.cmpt_entry_size );
+       hermon->icm_map[HERMON_ICM_CQ_CMPT].offset = icm_offset;
+       hermon->icm_map[HERMON_ICM_CQ_CMPT].len = cq_cmpt_len;
+       icm_offset += cmpt_max_len;
+       eq_cmpt_len = icm_usage ( log_num_eqs, hermon->cap.cmpt_entry_size );
+       hermon->icm_map[HERMON_ICM_EQ_CMPT].offset = icm_offset;
+       hermon->icm_map[HERMON_ICM_EQ_CMPT].len = eq_cmpt_len;
+       icm_offset += cmpt_max_len;
+
+       hermon->icm_map[HERMON_ICM_OTHER].offset = icm_offset;
+
+       /* Queue pair contexts */
+       MLX_FILL_1 ( init_hca, 12,
+                    qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_h,
+                    ( icm_offset >> 32 ) );
+       MLX_FILL_2 ( init_hca, 13,
+                    qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
+                    ( icm_offset >> 5 ),
+                    qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
+                    log_num_qps );
+       DBGC ( hermon, "Hermon %p ICM QPC base = %llx\n", hermon, icm_offset );
+       icm_offset += icm_usage ( log_num_qps, hermon->cap.qpc_entry_size );
+
+       /* Extended alternate path contexts */
+       MLX_FILL_1 ( init_hca, 24,
+                    qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_h,
+                    ( icm_offset >> 32 ) );
+       MLX_FILL_1 ( init_hca, 25,
+                    qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_l,
+                    icm_offset );
+       DBGC ( hermon, "Hermon %p ICM ALTC base = %llx\n", hermon, icm_offset);
+       icm_offset += icm_usage ( log_num_qps,
+                                 hermon->cap.altc_entry_size );
+
+       /* Extended auxiliary contexts */
+       MLX_FILL_1 ( init_hca, 28,
+                    qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_h,
+                    ( icm_offset >> 32 ) );
+       MLX_FILL_1 ( init_hca, 29,
+                    qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_l,
+                    icm_offset );
+       DBGC ( hermon, "Hermon %p ICM AUXC base = %llx\n", hermon, icm_offset);
+       icm_offset += icm_usage ( log_num_qps,
+                                 hermon->cap.auxc_entry_size );
+
+       /* Shared receive queue contexts */
+       MLX_FILL_1 ( init_hca, 18,
+                    qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_h,
+                    ( icm_offset >> 32 ) );
+       MLX_FILL_2 ( init_hca, 19,
+                    qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
+                    ( icm_offset >> 5 ),
+                    qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
+                    log_num_srqs );
+       DBGC ( hermon, "Hermon %p ICM SRQC base = %llx\n", hermon, icm_offset);
+       icm_offset += icm_usage ( log_num_srqs,
+                                 hermon->cap.srqc_entry_size );
+
+       /* Completion queue contexts */
+       MLX_FILL_1 ( init_hca, 20,
+                    qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_h,
+                    ( icm_offset >> 32 ) );
+       MLX_FILL_2 ( init_hca, 21,
+                    qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
+                    ( icm_offset >> 5 ),
+                    qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
+                    log_num_cqs );
+       DBGC ( hermon, "Hermon %p ICM CQC base = %llx\n", hermon, icm_offset );
+       icm_offset += icm_usage ( log_num_cqs, hermon->cap.cqc_entry_size );
+
+       /* Event queue contexts */
+       MLX_FILL_1 ( init_hca, 32,
+                    qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_h,
+                    ( icm_offset >> 32 ) );
+       MLX_FILL_2 ( init_hca, 33,
+                    qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
+                    ( icm_offset >> 5 ),
+                    qpc_eec_cqc_eqc_rdb_parameters.log_num_of_eq,
+                    log_num_eqs );
+       DBGC ( hermon, "Hermon %p ICM EQC base = %llx\n", hermon, icm_offset );
+       icm_offset += icm_usage ( log_num_eqs, hermon->cap.eqc_entry_size );
+
+       /* Memory translation table */
+       MLX_FILL_1 ( init_hca, 64,
+                    tpt_parameters.mtt_base_addr_h, ( icm_offset >> 32 ) );
+       MLX_FILL_1 ( init_hca, 65,
+                    tpt_parameters.mtt_base_addr_l, icm_offset );
+       DBGC ( hermon, "Hermon %p ICM MTT base = %llx\n", hermon, icm_offset );
+       icm_offset += icm_usage ( log_num_mtts,
+                                 hermon->cap.mtt_entry_size );
+
+       /* Memory protection table */
+       log_num_mpts = fls ( hermon->cap.reserved_mrws + 1 - 1 );
+       MLX_FILL_1 ( init_hca, 60,
+                    tpt_parameters.dmpt_base_adr_h, ( icm_offset >> 32 ) );
+       MLX_FILL_1 ( init_hca, 61,
+                    tpt_parameters.dmpt_base_adr_l, icm_offset );
+       MLX_FILL_1 ( init_hca, 62,
+                    tpt_parameters.log_dmpt_sz, log_num_mpts );
+       DBGC ( hermon, "Hermon %p ICM DMPT base = %llx\n", hermon, icm_offset);
+       icm_offset += icm_usage ( log_num_mpts,
+                                 hermon->cap.dmpt_entry_size );
+
+       /* Multicast table */
+       MLX_FILL_1 ( init_hca, 48,
+                    multicast_parameters.mc_base_addr_h,
+                    ( icm_offset >> 32 ) );
+       MLX_FILL_1 ( init_hca, 49,
+                    multicast_parameters.mc_base_addr_l, icm_offset );
+       MLX_FILL_1 ( init_hca, 52,
+                    multicast_parameters.log_mc_table_entry_sz,
+                    fls ( sizeof ( struct hermonprm_mcg_entry ) - 1 ) );
+       MLX_FILL_1 ( init_hca, 53,
+                    multicast_parameters.log_mc_table_hash_sz, 3 );
+       MLX_FILL_1 ( init_hca, 54,
+                    multicast_parameters.log_mc_table_sz, 3 );
+       DBGC ( hermon, "Hermon %p ICM MC base = %llx\n", hermon, icm_offset );
+       icm_offset += ( ( 8 * sizeof ( struct hermonprm_mcg_entry ) +
+                         HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
+
+       hermon->icm_map[HERMON_ICM_OTHER].len =
+               ( icm_offset - hermon->icm_map[HERMON_ICM_OTHER].offset );
+
+       /*
+        * Allocate and map physical memory for (portions of) ICM
+        *
+        * Map is:
+        *   ICM AUX area (aligned to its own size)
+        *   cMPT areas
+        *   Other areas
+        */
+
+       /* Calculate physical memory required for ICM */
+       icm_len = 0;
+       for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
+               icm_len += hermon->icm_map[i].len;
+       }
+
+       /* Get ICM auxiliary area size */
+       memset ( &icm_size, 0, sizeof ( icm_size ) );
+       MLX_FILL_1 ( &icm_size, 0, value_hi, ( icm_offset >> 32 ) );
+       MLX_FILL_1 ( &icm_size, 1, value, icm_offset );
+       if ( ( rc = hermon_cmd_set_icm_size ( hermon, &icm_size,
+                                             &icm_aux_size ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not set ICM size: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_set_icm_size;
+       }
+       icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * HERMON_PAGE_SIZE );
+       /* Must round up to nearest power of two :( */
+       icm_aux_len = ( 1 << fls ( icm_aux_len - 1 ) );
+
+       /* Allocate ICM data and auxiliary area */
+       DBGC ( hermon, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
+              hermon, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
+       hermon->icm = umalloc ( 2 * icm_aux_len + icm_len );
+       if ( ! hermon->icm ) {
+               rc = -ENOMEM;
+               goto err_alloc;
+       }
+       icm_phys = user_to_phys ( hermon->icm, 0 );
+
+       /* Map ICM auxiliary area */
+       icm_phys = ( ( icm_phys + icm_aux_len - 1 ) & ~( icm_aux_len - 1 ) );
+       memset ( &map_icm_aux, 0, sizeof ( map_icm_aux ) );
+       MLX_FILL_2 ( &map_icm_aux, 3,
+                    log2size, fls ( ( icm_aux_len / HERMON_PAGE_SIZE ) - 1 ),
+                    pa_l, ( icm_phys >> 12 ) );
+       DBGC ( hermon, "Hermon %p mapping ICM AUX (2^%d pages) => %08lx\n",
+              hermon, fls ( ( icm_aux_len / HERMON_PAGE_SIZE ) - 1 ),
+              icm_phys );
+       if ( ( rc = hermon_cmd_map_icm_aux ( hermon, &map_icm_aux ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not map AUX ICM: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_map_icm_aux;
+       }
+       icm_phys += icm_aux_len;
+
+       /* MAP ICM area */
+       for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
+               memset ( &map_icm, 0, sizeof ( map_icm ) );
+               MLX_FILL_1 ( &map_icm, 0,
+                            va_h, ( hermon->icm_map[i].offset >> 32 ) );
+               MLX_FILL_1 ( &map_icm, 1,
+                            va_l, ( hermon->icm_map[i].offset >> 12 ) );
+               MLX_FILL_2 ( &map_icm, 3,
+                            log2size,
+                            fls ( ( hermon->icm_map[i].len /
+                                    HERMON_PAGE_SIZE ) - 1 ),
+                            pa_l, ( icm_phys >> 12 ) );
+               DBGC ( hermon, "Hermon %p mapping ICM %llx+%zx (2^%d pages) "
+                      "=> %08lx\n", hermon, hermon->icm_map[i].offset,
+                      hermon->icm_map[i].len,
+                      fls ( ( hermon->icm_map[i].len /
+                              HERMON_PAGE_SIZE ) - 1 ), icm_phys );
+               if ( ( rc = hermon_cmd_map_icm ( hermon, &map_icm ) ) != 0 ) {
+                       DBGC ( hermon, "Hermon %p could not map ICM: %s\n",
+                              hermon, strerror ( rc ) );
+                       goto err_map_icm;
+               }
+               icm_phys += hermon->icm_map[i].len;
+       }
+
+       return 0;
+
+ err_map_icm:
+       assert ( i == 0 ); /* We don't handle partial failure at present */
+       hermon_cmd_unmap_icm_aux ( hermon );
+ err_map_icm_aux:
+       ufree ( hermon->icm );
+       hermon->icm = UNULL;
+ err_alloc:
+ err_set_icm_size:
+       return rc;
+}
+
+/**
+ * Free ICM
+ *
+ * @v hermon           Hermon device
+ */
+static void hermon_free_icm ( struct hermon *hermon ) {
+       struct hermonprm_scalar_parameter unmap_icm;
+       int i;
+
+       for ( i = ( HERMON_ICM_NUM_REGIONS - 1 ) ; i >= 0 ; i-- ) {
+               memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
+               MLX_FILL_1 ( &unmap_icm, 0, value_hi,
+                            ( hermon->icm_map[i].offset >> 32 ) );
+               MLX_FILL_1 ( &unmap_icm, 1, value,
+                            hermon->icm_map[i].offset );
+               hermon_cmd_unmap_icm ( hermon,
+                                      ( 1 << fls ( ( hermon->icm_map[i].len /
+                                                     HERMON_PAGE_SIZE ) - 1)),
+                                      &unmap_icm );
+       }
+       hermon_cmd_unmap_icm_aux ( hermon );
+       ufree ( hermon->icm );
+       hermon->icm = UNULL;
+}
+
+/***************************************************************************
+ *
+ * Infiniband link-layer operations
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Initialise Infiniband link
+ *
+ * @v hermon           Hermon device
+ * @ret rc             Return status code
+ */
+static int hermon_init_port ( struct hermon *hermon ) {
+       struct hermonprm_init_port init_port;
+       int rc;
+
+       memset ( &init_port, 0, sizeof ( init_port ) );
+       MLX_FILL_2 ( &init_port, 0,
+                    port_width_cap, 3,
+                    vl_cap, 1 );
+       MLX_FILL_2 ( &init_port, 1,
+                    mtu, HERMON_MTU_2048,
+                    max_gid, 1 );
+       MLX_FILL_1 ( &init_port, 2, max_pkey, 64 );
+       if ( ( rc = hermon_cmd_init_port ( hermon, PXE_IB_PORT,
+                                          &init_port ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not intialise port: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+
+       return 0;
+}
+
+/**
+ * Close Infiniband link
+ *
+ * @v hermon           Hermon device
+ */
+static void hermon_close_port ( struct hermon *hermon ) {
+       int rc;
+
+       if ( ( rc = hermon_cmd_close_port ( hermon, PXE_IB_PORT ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not close port: %s\n",
+                      hermon, strerror ( rc ) );
+               /* Nothing we can do about this */
+       }
+}
+
+/***************************************************************************
+ *
+ * PCI interface
+ *
+ ***************************************************************************
+ */
+
+/**
+ * Set up memory protection table
+ *
+ * @v hermon           Hermon device
+ * @ret rc             Return status code
+ */
+static int hermon_setup_mpt ( struct hermon *hermon ) {
+       struct hermonprm_mpt mpt;
+       uint32_t key;
+       int rc;
+
+       /* Derive key */
+       key = ( hermon->cap.reserved_mrws | HERMON_MKEY_PREFIX );
+       hermon->reserved_lkey = ( ( key << 8 ) | ( key >> 24 ) );
+
+       /* Initialise memory protection table */
+       memset ( &mpt, 0, sizeof ( mpt ) );
+       MLX_FILL_4 ( &mpt, 0,
+                    r_w, 1,
+                    pa, 1,
+                    lr, 1,
+                    lw, 1 );
+       MLX_FILL_1 ( &mpt, 2, mem_key, key );
+       MLX_FILL_1 ( &mpt, 3, pd, HERMON_GLOBAL_PD );
+       MLX_FILL_1 ( &mpt, 10, len64, 1 );
+       if ( ( rc = hermon_cmd_sw2hw_mpt ( hermon,
+                                          hermon->cap.reserved_mrws,
+                                          &mpt ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not set up MPT: %s\n",
+                      hermon, strerror ( rc ) );
+               return rc;
+       }
+
+       return 0;
+}
+
+/**
+ * Probe PCI device
+ *
+ * @v pci              PCI device
+ * @v id               PCI ID
+ * @ret rc             Return status code
+ */
+static int hermon_probe ( struct pci_device *pci,
+                         const struct pci_device_id *id __unused ) {
+       struct ib_device *ibdev;
+       struct hermon *hermon;
+       struct hermonprm_init_hca init_hca;
+       int rc;
+
+       /* Allocate Infiniband device */
+       ibdev = alloc_ibdev ( sizeof ( *hermon ) );
+       if ( ! ibdev ) {
+               rc = -ENOMEM;
+               goto err_ibdev;
+       }
+       ibdev->op = &hermon_ib_operations;
+       pci_set_drvdata ( pci, ibdev );
+       ibdev->dev = &pci->dev;
+       hermon = ibdev->dev_priv;
+       memset ( hermon, 0, sizeof ( *hermon ) );
+
+       /* Fix up PCI device */
+       adjust_pci_device ( pci );
+
+       /* Get PCI BARs */
+       hermon->config = ioremap ( pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR),
+                                  HERMON_PCI_CONFIG_BAR_SIZE );
+       hermon->uar = ioremap ( ( pci_bar_start ( pci, HERMON_PCI_UAR_BAR ) +
+                                 HERMON_UAR_PAGE * HERMON_PAGE_SIZE ),
+                               HERMON_PAGE_SIZE );
+
+       /* Allocate space for mailboxes */
+       hermon->mailbox_in = malloc_dma ( HERMON_MBOX_SIZE,
+                                         HERMON_MBOX_ALIGN );
+       if ( ! hermon->mailbox_in ) {
+               rc = -ENOMEM;
+               goto err_mailbox_in;
+       }
+       hermon->mailbox_out = malloc_dma ( HERMON_MBOX_SIZE,
+                                          HERMON_MBOX_ALIGN );
+       if ( ! hermon->mailbox_out ) {
+               rc = -ENOMEM;
+               goto err_mailbox_out;
+       }
+
+       /* Start firmware */
+       if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
+               goto err_start_firmware;
+
+       /* Get device limits */
+       if ( ( rc = hermon_get_cap ( hermon ) ) != 0 )
+               goto err_get_cap;
+
+       /* Allocate ICM */
+       memset ( &init_hca, 0, sizeof ( init_hca ) );
+       if ( ( rc = hermon_alloc_icm ( hermon, &init_hca ) ) != 0 )
+               goto err_alloc_icm;
+
+       /* Initialise HCA */
+       MLX_FILL_1 ( &init_hca, 0, version, 0x02 /* "Must be 0x02" */ );
+       MLX_FILL_1 ( &init_hca, 5, udp, 1 );
+       MLX_FILL_1 ( &init_hca, 74, uar_parameters.log_max_uars, 8 );
+       if ( ( rc = hermon_cmd_init_hca ( hermon, &init_hca ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not initialise HCA: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_init_hca;
+       }
+
+       /* Set up memory protection */
+       if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
+               goto err_setup_mpt;
+
+       /* Bring up IB layer */
+       if ( ( rc = hermon_init_port ( hermon ) ) != 0 )
+               goto err_init_port;
+
+       /* Wait for link */
+       if ( ( rc = hermon_wait_for_link ( hermon ) ) != 0 )
+               goto err_wait_for_link;
+
+       /* Get MAD parameters */
+       if ( ( rc = hermon_get_mad_params ( ibdev ) ) != 0 )
+               goto err_get_mad_params;
+
+       DBGC ( hermon, "Hermon %p port GID is %08lx:%08lx:%08lx:%08lx\n",
+              hermon, htonl ( ibdev->port_gid.u.dwords[0] ),
+              htonl ( ibdev->port_gid.u.dwords[1] ),
+              htonl ( ibdev->port_gid.u.dwords[2] ),
+              htonl ( ibdev->port_gid.u.dwords[3] ) );
+
+       /* Add IPoIB device */
+       if ( ( rc = ipoib_probe ( ibdev ) ) != 0 ) {
+               DBGC ( hermon, "Hermon %p could not add IPoIB device: %s\n",
+                      hermon, strerror ( rc ) );
+               goto err_ipoib_probe;
+       }
+
+       return 0;
+
+ err_ipoib_probe:
+ err_get_mad_params:
+ err_wait_for_link:
+       hermon_close_port ( hermon );
+ err_init_port:
+ err_setup_mpt:
+       hermon_cmd_close_hca ( hermon );
+ err_init_hca:
+       hermon_free_icm ( hermon );
+ err_alloc_icm:
+ err_get_cap:
+       hermon_stop_firmware ( hermon );
+ err_start_firmware:
+       free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
+ err_mailbox_out:
+       free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
+ err_mailbox_in:
+       free_ibdev ( ibdev );
+ err_ibdev:
+       return rc;
+}
+
+/**
+ * Remove PCI device
+ *
+ * @v pci              PCI device
+ */
+static void hermon_remove ( struct pci_device *pci ) {
+       struct ib_device *ibdev = pci_get_drvdata ( pci );
+       struct hermon *hermon = ibdev->dev_priv;
+
+       ipoib_remove ( ibdev );
+       hermon_close_port ( hermon );
+       hermon_cmd_close_hca ( hermon );
+       hermon_free_icm ( hermon );
+       hermon_stop_firmware ( hermon );
+       hermon_stop_firmware ( hermon );
+       free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
+       free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
+       free_ibdev ( ibdev );
+}
+
+static struct pci_device_id hermon_nics[] = {
+       PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver" ),
+       PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver" ),
+};
+
+struct pci_driver hermon_driver __pci_driver = {
+       .ids = hermon_nics,
+       .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
+       .probe = hermon_probe,
+       .remove = hermon_remove,
+};
diff --git a/src/drivers/infiniband/hermon.h b/src/drivers/infiniband/hermon.h
new file mode 100644 (file)
index 0000000..1aee443
--- /dev/null
@@ -0,0 +1,459 @@
+#ifndef _HERMON_H
+#define _HERMON_H
+
+/** @file
+ *
+ * Mellanox Hermon Infiniband HCA driver
+ *
+ */
+
+#include <stdint.h>
+#include <gpxe/uaccess.h>
+#include "mlx_bitops.h"
+#include "MT25408_PRM.h"
+
+/*
+ * Hardware constants
+ *
+ */
+
+/* PCI BARs */
+#define HERMON_PCI_CONFIG_BAR          PCI_BASE_ADDRESS_0
+#define HERMON_PCI_CONFIG_BAR_SIZE     0x100000
+#define HERMON_PCI_UAR_BAR             PCI_BASE_ADDRESS_2
+
+/* Work queue entry and completion queue entry opcodes */
+#define HERMON_OPCODE_SEND             0x0a
+#define HERMON_OPCODE_RECV_ERROR       0xfe
+#define HERMON_OPCODE_SEND_ERROR       0xff
+
+/* HCA command register opcodes */
+#define HERMON_HCR_QUERY_DEV_CAP       0x0003
+#define HERMON_HCR_QUERY_FW            0x0004
+#define HERMON_HCR_INIT_HCA            0x0007
+#define HERMON_HCR_CLOSE_HCA           0x0008
+#define HERMON_HCR_INIT_PORT           0x0009
+#define HERMON_HCR_CLOSE_PORT          0x000a
+#define HERMON_HCR_SW2HW_MPT           0x000d
+#define HERMON_HCR_WRITE_MTT           0x0011
+#define HERMON_HCR_MAP_EQ              0x0012
+#define HERMON_HCR_SW2HW_EQ            0x0013
+#define HERMON_HCR_HW2SW_EQ            0x0014
+#define HERMON_HCR_SW2HW_CQ            0x0016
+#define HERMON_HCR_HW2SW_CQ            0x0017
+#define HERMON_HCR_RST2INIT_QP         0x0019
+#define HERMON_HCR_INIT2RTR_QP         0x001a
+#define HERMON_HCR_RTR2RTS_QP          0x001b
+#define HERMON_HCR_2RST_QP             0x0021
+#define HERMON_HCR_MAD_IFC             0x0024
+#define HERMON_HCR_READ_MCG            0x0025
+#define HERMON_HCR_WRITE_MCG           0x0026
+#define HERMON_HCR_MGID_HASH           0x0027
+#define HERMON_HCR_RUN_FW              0x0ff6
+#define HERMON_HCR_DISABLE_LAM         0x0ff7
+#define HERMON_HCR_ENABLE_LAM          0x0ff8
+#define HERMON_HCR_UNMAP_ICM           0x0ff9
+#define HERMON_HCR_MAP_ICM             0x0ffa
+#define HERMON_HCR_UNMAP_ICM_AUX       0x0ffb
+#define HERMON_HCR_MAP_ICM_AUX         0x0ffc
+#define HERMON_HCR_SET_ICM_SIZE                0x0ffd
+#define HERMON_HCR_UNMAP_FA            0x0ffe
+#define HERMON_HCR_MAP_FA              0x0fff
+
+/* Service types */
+#define HERMON_ST_UD                   0x03
+
+/* MTUs */
+#define HERMON_MTU_2048                        0x04
+
+#define HERMON_INVALID_LKEY            0x00000100UL
+
+#define HERMON_PAGE_SIZE               4096
+
+#define HERMON_DB_POST_SND_OFFSET      0x14
+
+/*
+ * Datatypes that seem to be missing from the autogenerated documentation
+ *
+ */
+struct hermonprm_mgm_hash_st {
+       pseudo_bit_t reserved0[0x00020];
+/* -------------- */
+       pseudo_bit_t hash[0x00010];
+       pseudo_bit_t reserved1[0x00010];
+} __attribute__ (( packed ));
+
+struct hermonprm_mcg_entry_st {
+       struct hermonprm_mcg_hdr_st hdr;
+       struct hermonprm_mcg_qp_dw_st qp[8];
+} __attribute__ (( packed ));
+
+struct hermonprm_cq_db_record_st {
+       pseudo_bit_t update_ci[0x00018];
+       pseudo_bit_t reserved0[0x00008];
+/* -------------- */
+       pseudo_bit_t arm_ci[0x00018];
+       pseudo_bit_t cmd[0x00003];
+       pseudo_bit_t reserved1[0x00001];
+       pseudo_bit_t cmd_sn[0x00002];
+       pseudo_bit_t reserved2[0x00002];
+} __attribute__ (( packed ));
+
+struct hermonprm_send_db_register_st {
+       pseudo_bit_t reserved[0x00008];
+       pseudo_bit_t qn[0x00018];
+} __attribute__ (( packed ));
+
+struct hermonprm_scalar_parameter_st {
+       pseudo_bit_t value_hi[0x00020];
+/* -------------- */
+       pseudo_bit_t value[0x00020];
+} __attribute__ (( packed ));
+
+/*
+ * Wrapper structures for hardware datatypes
+ *
+ */
+
+struct MLX_DECLARE_STRUCT ( hermonprm_completion_queue_context );
+struct MLX_DECLARE_STRUCT ( hermonprm_completion_queue_entry );
+struct MLX_DECLARE_STRUCT ( hermonprm_completion_with_error );
+struct MLX_DECLARE_STRUCT ( hermonprm_cq_db_record );
+struct MLX_DECLARE_STRUCT ( hermonprm_eqc );
+struct MLX_DECLARE_STRUCT ( hermonprm_hca_command_register );
+struct MLX_DECLARE_STRUCT ( hermonprm_init_hca );
+struct MLX_DECLARE_STRUCT ( hermonprm_init_port );
+struct MLX_DECLARE_STRUCT ( hermonprm_mad_ifc );
+struct MLX_DECLARE_STRUCT ( hermonprm_mcg_entry );
+struct MLX_DECLARE_STRUCT ( hermonprm_mgm_hash );
+struct MLX_DECLARE_STRUCT ( hermonprm_mpt );
+struct MLX_DECLARE_STRUCT ( hermonprm_mtt );
+struct MLX_DECLARE_STRUCT ( hermonprm_qp_db_record );
+struct MLX_DECLARE_STRUCT ( hermonprm_qp_ee_state_transitions );
+struct MLX_DECLARE_STRUCT ( hermonprm_query_dev_cap );
+struct MLX_DECLARE_STRUCT ( hermonprm_query_fw );
+struct MLX_DECLARE_STRUCT ( hermonprm_queue_pair_ee_context_entry );
+struct MLX_DECLARE_STRUCT ( hermonprm_scalar_parameter );
+struct MLX_DECLARE_STRUCT ( hermonprm_send_db_register );
+struct MLX_DECLARE_STRUCT ( hermonprm_ud_address_vector );
+struct MLX_DECLARE_STRUCT ( hermonprm_virtual_physical_mapping );
+struct MLX_DECLARE_STRUCT ( hermonprm_wqe_segment_ctrl_send );
+struct MLX_DECLARE_STRUCT ( hermonprm_wqe_segment_data_ptr );
+struct MLX_DECLARE_STRUCT ( hermonprm_wqe_segment_ud );
+
+/*
+ * Composite hardware datatypes
+ *
+ */
+
+struct hermonprm_write_mtt {
+       struct hermonprm_scalar_parameter mtt_base_addr;
+       struct hermonprm_scalar_parameter reserved;
+       struct hermonprm_mtt mtt;
+} __attribute__ (( packed ));
+
+#define HERMON_MAX_GATHER 1
+
+struct hermonprm_ud_send_wqe {
+       struct hermonprm_wqe_segment_ctrl_send ctrl;
+       struct hermonprm_wqe_segment_ud ud;
+       struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_GATHER];
+} __attribute__ (( packed ));
+
+#define HERMON_MAX_SCATTER 1
+
+struct hermonprm_recv_wqe {
+       struct hermonprm_wqe_segment_data_ptr data[HERMON_MAX_SCATTER];
+} __attribute__ (( packed ));
+
+union hermonprm_completion_entry {
+       struct hermonprm_completion_queue_entry normal;
+       struct hermonprm_completion_with_error error;
+} __attribute__ (( packed ));
+
+union hermonprm_doorbell_register {
+       struct hermonprm_send_db_register send;
+       uint32_t dword[1];
+} __attribute__ (( packed ));
+
+union hermonprm_mad {
+       struct hermonprm_mad_ifc ifc;
+       union ib_mad mad;
+} __attribute__ (( packed ));
+
+/*
+ * gPXE-specific definitions
+ *
+ */
+
+/** Hermon device capabilitiess */
+struct hermon_dev_cap {
+       /** CMPT entry size */
+       size_t cmpt_entry_size;
+       /** Number of reserved QPs */
+       unsigned int reserved_qps;
+       /** QP context entry size */
+       size_t qpc_entry_size;
+       /** Alternate path context entry size */
+       size_t altc_entry_size;
+       /** Auxiliary context entry size */
+       size_t auxc_entry_size;
+       /** Number of reserved SRQs */
+       unsigned int reserved_srqs;
+       /** SRQ context entry size */
+       size_t srqc_entry_size;
+       /** Number of reserved CQs */
+       unsigned int reserved_cqs;
+       /** CQ context entry size */
+       size_t cqc_entry_size;
+       /** Number of reserved EQs */
+       unsigned int reserved_eqs;
+       /** EQ context entry size */
+       size_t eqc_entry_size;
+       /** Number of reserved MTTs */
+       unsigned int reserved_mtts;
+       /** MTT entry size */
+       size_t mtt_entry_size;
+       /** Number of reserved MRWs */
+       unsigned int reserved_mrws;
+       /** DMPT entry size */
+       size_t dmpt_entry_size;
+       /** Number of reserved UARs */
+       unsigned int reserved_uars;
+};
+
+/** Number of cMPT entries of each type */
+#define HERMON_CMPT_MAX_ENTRIES ( 1 << 24 )
+
+/** Hermon ICM memory map entry */
+struct hermon_icm_map {
+       /** Offset (virtual address within ICM) */
+       uint64_t offset;
+       /** Length */
+       size_t len;
+};
+
+/** Discontiguous regions within Hermon ICM */
+enum hermon_icm_map_regions {
+       HERMON_ICM_QP_CMPT = 0,
+       HERMON_ICM_SRQ_CMPT,
+       HERMON_ICM_CQ_CMPT,
+       HERMON_ICM_EQ_CMPT,
+       HERMON_ICM_OTHER,
+       HERMON_ICM_NUM_REGIONS
+};
+
+/** UAR page for doorbell accesses
+ *
+ * Pages 0-127 are reserved for event queue doorbells only, so we use
+ * page 128.
+ */
+#define HERMON_UAR_PAGE                128
+
+/** Maximum number of allocatable MTT entries
+ *
+ * This is a policy decision, not a device limit.
+ */
+#define HERMON_MAX_MTTS                64
+
+/** A Hermon MTT descriptor */
+struct hermon_mtt {
+       /** MTT offset */
+       unsigned int mtt_offset;
+       /** Number of pages */
+       unsigned int num_pages;
+       /** MTT base address */
+       unsigned int mtt_base_addr;
+       /** Offset within page */
+       unsigned int page_offset;
+};
+
+/** Alignment of Hermon send work queue entries */
+#define HERMON_SEND_WQE_ALIGN 128
+
+/** A Hermon send work queue entry */
+union hermon_send_wqe {
+       struct hermonprm_ud_send_wqe ud;
+       uint8_t force_align[HERMON_SEND_WQE_ALIGN];
+} __attribute__ (( packed ));
+
+/** A Hermon send work queue */
+struct hermon_send_work_queue {
+       /** Number of work queue entries, including headroom
+        *
+        * Hermon requires us to leave unused space within the send
+        * WQ, so we create a send WQ with more entries than are
+        * requested in the create_qp() call.
+        */
+       unsigned int num_wqes;
+       /** Work queue entries */
+       union hermon_send_wqe *wqe;
+       /** Size of work queue */
+       size_t wqe_size;
+};
+
+/** Alignment of Hermon receive work queue entries */
+#define HERMON_RECV_WQE_ALIGN 16
+
+/** A Hermon receive work queue entry */
+union hermon_recv_wqe {
+       struct hermonprm_recv_wqe recv;
+       uint8_t force_align[HERMON_RECV_WQE_ALIGN];
+} __attribute__ (( packed ));
+
+/** A Hermon receive work queue */
+struct hermon_recv_work_queue {
+       /** Work queue entries */
+       union hermon_recv_wqe *wqe;
+       /** Size of work queue */
+       size_t wqe_size;
+       /** Doorbell */
+       struct hermonprm_qp_db_record doorbell __attribute__ (( aligned (4) ));
+};
+
+/** Maximum number of allocatable queue pairs
+ *
+ * This is a policy decision, not a device limit.
+ */
+#define HERMON_MAX_QPS         8
+
+/** Base queue pair number */
+#define HERMON_QPN_BASE 0x550000
+
+/** A Hermon queue pair */
+struct hermon_queue_pair {
+       /** Work queue buffer */
+       void *wqe;
+       /** Size of work queue buffer */
+       size_t wqe_size;
+       /** MTT descriptor */
+       struct hermon_mtt mtt;
+       /** Send work queue */
+       struct hermon_send_work_queue send;
+       /** Receive work queue */
+       struct hermon_recv_work_queue recv;
+};
+
+/** Maximum number of allocatable completion queues
+ *
+ * This is a policy decision, not a device limit.
+ */
+#define HERMON_MAX_CQS         8
+
+/** A Hermon completion queue */
+struct hermon_completion_queue {
+       /** Completion queue entries */
+       union hermonprm_completion_entry *cqe;
+       /** Size of completion queue */
+       size_t cqe_size;
+       /** MTT descriptor */
+       struct hermon_mtt mtt;
+       /** Doorbell */
+       struct hermonprm_cq_db_record doorbell __attribute__ (( aligned (8) ));
+};
+
+/** Maximum number of allocatable event queues
+ *
+ * This is a policy decision, not a device limit.
+ */
+#define HERMON_MAX_EQS         4
+
+/** A Hermon resource bitmask */
+typedef uint32_t hermon_bitmask_t;
+
+/** Size of a hermon resource bitmask */
+#define HERMON_BITMASK_SIZE(max_entries)                                    \
+       ( ( (max_entries) + ( 8 * sizeof ( hermon_bitmask_t ) ) - 1 ) /      \
+         ( 8 * sizeof ( hermon_bitmask_t ) ) )
+
+/** A Hermon device */
+struct hermon {
+       /** PCI configuration registers */
+       void *config;
+       /** PCI user Access Region */
+       void *uar;
+
+       /** Command toggle */
+       unsigned int toggle;
+       /** Command input mailbox */
+       void *mailbox_in;
+       /** Command output mailbox */
+       void *mailbox_out;
+
+       /** Firmware area in external memory */
+       userptr_t firmware_area;
+       /** ICM map */
+       struct hermon_icm_map icm_map[HERMON_ICM_NUM_REGIONS];
+       /** ICM area */
+       userptr_t icm;
+
+       /** Reserved LKey
+        *
+        * Used to get unrestricted memory access.
+        */
+       unsigned long reserved_lkey;
+
+       /** Completion queue in-use bitmask */
+       hermon_bitmask_t cq_inuse[ HERMON_BITMASK_SIZE ( HERMON_MAX_CQS ) ];
+       /** Queue pair in-use bitmask */
+       hermon_bitmask_t qp_inuse[ HERMON_BITMASK_SIZE ( HERMON_MAX_QPS ) ];
+       /** MTT entry in-use bitmask */
+       hermon_bitmask_t mtt_inuse[ HERMON_BITMASK_SIZE ( HERMON_MAX_MTTS ) ];
+
+       /** Device capabilities */
+       struct hermon_dev_cap cap;
+};
+
+/** Global protection domain */
+#define HERMON_GLOBAL_PD               0x123456
+
+/** Memory key prefix */
+#define HERMON_MKEY_PREFIX             0x77000000UL
+
+/*
+ * HCA commands
+ *
+ */
+
+#define HERMON_HCR_BASE                        0x80680
+#define HERMON_HCR_REG(x)              ( HERMON_HCR_BASE + 4 * (x) )
+#define HERMON_HCR_MAX_WAIT_MS         2000
+#define HERMON_MBOX_ALIGN              4096
+#define HERMON_MBOX_SIZE               512
+
+/* HCA command is split into
+ *
+ * bits  11:0  Opcode
+ * bit     12  Input uses mailbox
+ * bit     13  Output uses mailbox
+ * bits 22:14  Input parameter length (in dwords)
+ * bits 31:23  Output parameter length (in dwords)
+ *
+ * Encoding the information in this way allows us to cut out several
+ * parameters to the hermon_command() call.
+ */
+#define HERMON_HCR_IN_MBOX             0x00001000UL
+#define HERMON_HCR_OUT_MBOX            0x00002000UL
+#define HERMON_HCR_OPCODE( _command )  ( (_command) & 0xfff )
+#define HERMON_HCR_IN_LEN( _command )  ( ( (_command) >> 12 ) & 0x7fc )
+#define HERMON_HCR_OUT_LEN( _command ) ( ( (_command) >> 21 ) & 0x7fc )
+
+/** Build HCR command from component parts */
+#define HERMON_HCR_INOUT_CMD( _opcode, _in_mbox, _in_len,                   \
+                            _out_mbox, _out_len )                           \
+       ( (_opcode) |                                                        \
+         ( (_in_mbox) ? HERMON_HCR_IN_MBOX : 0 ) |                          \
+         ( ( (_in_len) / 4 ) << 14 ) |                                      \
+         ( (_out_mbox) ? HERMON_HCR_OUT_MBOX : 0 ) |                        \
+         ( ( (_out_len) / 4 ) << 23 ) )
+
+#define HERMON_HCR_IN_CMD( _opcode, _in_mbox, _in_len )                             \
+       HERMON_HCR_INOUT_CMD ( _opcode, _in_mbox, _in_len, 0, 0 )
+
+#define HERMON_HCR_OUT_CMD( _opcode, _out_mbox, _out_len )                  \
+       HERMON_HCR_INOUT_CMD ( _opcode, 0, 0, _out_mbox, _out_len )
+
+#define HERMON_HCR_VOID_CMD( _opcode )                                      \
+       HERMON_HCR_INOUT_CMD ( _opcode, 0, 0, 0, 0 )
+
+#endif /* _HERMON_H */
index c234d13..09ae64e 100644 (file)
 
 #define ERRFILE_scsi                ( ERRFILE_DRIVER | 0x00700000 )
 #define ERRFILE_arbel               ( ERRFILE_DRIVER | 0x00710000 )
+#define ERRFILE_hermon              ( ERRFILE_DRIVER | 0x00720000 )
 
 #define ERRFILE_aoe                    ( ERRFILE_NET | 0x00000000 )
 #define ERRFILE_arp                    ( ERRFILE_NET | 0x00010000 )