[HW, TOOLS] return system_image_guid
[mirror/winof/.git] / hw / mthca / kernel / hca_data.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "hca_driver.h"\r
35 #include "hca_utils.h"\r
36 \r
37 #if defined(EVENT_TRACING)\r
38 #ifdef offsetof\r
39 #undef offsetof\r
40 #endif\r
41 #include "hca_data.tmh"\r
42 #endif\r
43 \r
44 #include "mthca_dev.h"\r
45 #include <ib_cache.h>\r
46 \r
47 static cl_spinlock_t    hob_lock;\r
48 \r
49 \r
50 \r
51 uint32_t                g_mlnx_dpc2thread = 0;\r
52 \r
53 \r
54 cl_qlist_t              mlnx_hca_list;\r
55 \r
56 mlnx_hob_t              mlnx_hob_array[MLNX_NUM_HOBKL];         // kernel HOB - one per HCA (cmdif access)\r
57 mlnx_hobul_t    *mlnx_hobul_array[MLNX_NUM_HOBUL];      // kernel HOBUL - one per HCA (kar access)\r
58 \r
59 /////////////////////////////////////////////////////////\r
60 // ### HCA\r
61 /////////////////////////////////////////////////////////\r
62 void\r
63 mlnx_hca_insert(\r
64         IN                              mlnx_hca_t                                      *p_hca )\r
65 {\r
66         cl_spinlock_acquire( &hob_lock );\r
67         cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
68         cl_spinlock_release( &hob_lock );\r
69 }\r
70 \r
71 void\r
72 mlnx_hca_remove(\r
73         IN                              mlnx_hca_t                                      *p_hca )\r
74 {\r
75         cl_spinlock_acquire( &hob_lock );\r
76         cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
77         cl_spinlock_release( &hob_lock );\r
78 }\r
79 \r
80 mlnx_hca_t*\r
81 mlnx_hca_from_guid(\r
82         IN                              ib_net64_t                                      guid )\r
83 {\r
84         cl_list_item_t  *p_item;\r
85         mlnx_hca_t              *p_hca = NULL;\r
86 \r
87         cl_spinlock_acquire( &hob_lock );\r
88         p_item = cl_qlist_head( &mlnx_hca_list );\r
89         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
90         {\r
91                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
92                 if( p_hca->guid == guid )\r
93                         break;\r
94                 p_item = cl_qlist_next( p_item );\r
95                 p_hca = NULL;\r
96         }\r
97         cl_spinlock_release( &hob_lock );\r
98         return p_hca;\r
99 }\r
100 \r
101 /*\r
102 void\r
103 mlnx_names_from_guid(\r
104         IN                              ib_net64_t                                      guid,\r
105                 OUT                     char                                            **hca_name_p,\r
106                 OUT                     char                                            **dev_name_p)\r
107 {\r
108         unsigned int idx;\r
109 \r
110         if (!hca_name_p) return;\r
111         if (!dev_name_p) return;\r
112 \r
113         for (idx = 0; idx < mlnx_num_hca; idx++)\r
114         {\r
115                 if (mlnx_hca_array[idx].ifx.guid == guid)\r
116                 {\r
117                         *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
118                         *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
119                 }\r
120         }\r
121 }\r
122 */\r
123 \r
124 /////////////////////////////////////////////////////////\r
125 // ### HCA\r
126 /////////////////////////////////////////////////////////\r
127 cl_status_t\r
128 mlnx_hcas_init( void )\r
129 {\r
130         cl_qlist_init( &mlnx_hca_list );\r
131         return cl_spinlock_init( &hob_lock );\r
132 }\r
133 \r
134 \r
135 /////////////////////////////////////////////////////////\r
136 /////////////////////////////////////////////////////////\r
137 ib_api_status_t\r
138 mlnx_hobs_set_cb(\r
139         IN                              mlnx_hob_t                                      *hob_p, \r
140         IN                              ci_completion_cb_t                      comp_cb_p,\r
141         IN                              ci_async_event_cb_t                     async_cb_p,\r
142         IN              const   void* const                                     ib_context)\r
143 {\r
144         cl_status_t             cl_status;\r
145 \r
146         // Setup the callbacks\r
147         if (!hob_p->async_proc_mgr_p)\r
148         {\r
149                 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
150                 if( !hob_p->async_proc_mgr_p )\r
151                 {\r
152                         return IB_INSUFFICIENT_MEMORY;\r
153                 }\r
154                 cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
155                 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
156                 if( cl_status != CL_SUCCESS )\r
157                 {\r
158                         cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
159                         cl_free(hob_p->async_proc_mgr_p);\r
160                         hob_p->async_proc_mgr_p = NULL;\r
161                         return IB_INSUFFICIENT_RESOURCES;\r
162                 }\r
163         }\r
164 \r
165         hob_p->comp_cb_p        = comp_cb_p;\r
166         hob_p->async_cb_p = async_cb_p;\r
167         hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
168         HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array), ib_context));\r
169         return IB_SUCCESS;\r
170 }\r
171 \r
172 /////////////////////////////////////////////////////////\r
173 /////////////////////////////////////////////////////////\r
174 void\r
175 mlnx_hobs_remove(\r
176         IN                              mlnx_hob_t                                      *hob_p)\r
177 {\r
178         cl_async_proc_t *p_async_proc;\r
179 \r
180 \r
181         cl_spinlock_acquire( &hob_lock );\r
182 \r
183         hob_p->mark = E_MARK_INVALID;\r
184 \r
185         p_async_proc = hob_p->async_proc_mgr_p;\r
186         hob_p->async_proc_mgr_p = NULL;\r
187 \r
188         hob_p->comp_cb_p = NULL;\r
189         hob_p->async_cb_p = NULL;\r
190         hob_p->ca_context = NULL;\r
191         hob_p->cl_device_h = NULL;\r
192 \r
193         cl_spinlock_release( &hob_lock );\r
194 \r
195         if( p_async_proc )\r
196         {\r
197                 cl_async_proc_destroy( p_async_proc );\r
198                 cl_free( p_async_proc );\r
199         }\r
200 \r
201 \r
202 \r
203         HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hobs_remove idx %d \n", (int)(hob_p - mlnx_hob_array)));\r
204 }\r
205 \r
206 /////////////////////////////////////////////////////////\r
207 /////////////////////////////////////////////////////////\r
208 void\r
209 mthca_port_cap_to_ibal(\r
210         IN                              u32                     mthca_port_cap,\r
211                 OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
212 {\r
213 #define SET_CAP(flag,cap)       if (mthca_port_cap & flag) ibal_port_cap_p->cap = TRUE\r
214 \r
215         SET_CAP(IB_PORT_CM_SUP,cm);\r
216         SET_CAP(IB_PORT_SNMP_TUNNEL_SUP,snmp);\r
217         SET_CAP(IB_PORT_DEVICE_MGMT_SUP,dev_mgmt);\r
218         SET_CAP(IB_PORT_VENDOR_CLASS_SUP,vend);\r
219         SET_CAP(IB_PORT_SM_DISABLED,sm_disable);\r
220         SET_CAP(IB_PORT_SM,sm);\r
221         SET_CAP(IB_PORT_NOTICE_SUP,notice);\r
222         SET_CAP(IB_PORT_TRAP_SUP,trap);\r
223         SET_CAP(IB_PORT_AUTO_MIGR_SUP,apm);\r
224         SET_CAP(IB_PORT_SL_MAP_SUP,slmap);\r
225         SET_CAP(IB_PORT_LED_INFO_SUP,ledinfo);\r
226         SET_CAP(IB_PORT_CAP_MASK_NOTICE_SUP,capm_notice);\r
227         SET_CAP(IB_PORT_CLIENT_REG_SUP,client_reregister);\r
228         SET_CAP(IB_PORT_SYS_IMAGE_GUID_SUP,sysguid);\r
229         SET_CAP(IB_PORT_BOOT_MGMT_SUP,boot_mgmt);\r
230         SET_CAP(IB_PORT_DR_NOTICE_SUP,dr_notice);\r
231         SET_CAP(IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP,pkey_switch_ext_port);\r
232         SET_CAP(IB_PORT_LINK_LATENCY_SUP,link_rtl);\r
233         SET_CAP(IB_PORT_REINIT_SUP,reinit);\r
234         SET_CAP(IB_PORT_OPT_IPD_SUP,ipd);\r
235         SET_CAP(IB_PORT_MKEY_NVRAM,mkey_nvram);\r
236         SET_CAP(IB_PORT_PKEY_NVRAM,pkey_nvram);\r
237         // there no MTHCA flags for qkey_ctr, pkey_ctr, port_active, bm IBAL capabilities;\r
238 }\r
239 \r
240 \r
241 /////////////////////////////////////////////////////////\r
242 void\r
243 mlnx_conv_hca_cap(\r
244         IN                              struct ib_device *ib_dev,\r
245         IN                              struct ib_device_attr *hca_info_p,\r
246         IN                              struct ib_port_attr  *hca_ports,\r
247         OUT                     ib_ca_attr_t                            *ca_attr_p)\r
248 {\r
249         uint8_t                 port_num;\r
250         ib_port_attr_t  *ibal_port_p;\r
251         struct ib_port_attr  *mthca_port_p;\r
252 \r
253         ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
254         ca_attr_p->dev_id   = (uint16_t)hca_info_p->vendor_part_id;\r
255         ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
256         ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
257         ca_attr_p->ca_guid   = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid;\r
258         ca_attr_p->num_ports = ib_dev->phys_port_cnt;\r
259         ca_attr_p->max_qps   = hca_info_p->max_qp;\r
260         ca_attr_p->max_wrs   = hca_info_p->max_qp_wr;\r
261         ca_attr_p->max_sges   = hca_info_p->max_sge;\r
262         ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd;\r
263         ca_attr_p->max_cqs    = hca_info_p->max_cq;\r
264         ca_attr_p->max_cqes  = hca_info_p->max_cqe;\r
265         ca_attr_p->max_pds    = hca_info_p->max_pd;\r
266         ca_attr_p->init_regions = hca_info_p->max_mr;\r
267         ca_attr_p->init_windows = hca_info_p->max_mw;\r
268         ca_attr_p->init_region_size = hca_info_p->max_mr_size;\r
269         ca_attr_p->max_addr_handles = hca_info_p->max_ah;\r
270         ca_attr_p->atomicity     = hca_info_p->atomic_cap;\r
271         ca_attr_p->max_partitions = hca_info_p->max_pkeys;\r
272         ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom;\r
273         ca_attr_p->max_resp_res    = (uint8_t)hca_info_p->max_res_rd_atom;\r
274         ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom;\r
275         ca_attr_p->max_ipv6_qps    = hca_info_p->max_raw_ipv6_qp;\r
276         ca_attr_p->max_ether_qps   = hca_info_p->max_raw_ethy_qp;\r
277         ca_attr_p->max_mcast_grps  = hca_info_p->max_mcast_grp;\r
278         ca_attr_p->max_mcast_qps   = hca_info_p->max_total_mcast_qp_attach;\r
279         ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
280         ca_attr_p->max_fmr   = hca_info_p->max_fmr;\r
281         ca_attr_p->max_map_per_fmr   = hca_info_p->max_map_per_fmr;\r
282         ca_attr_p->max_srq = hca_info_p->max_srq;\r
283         ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr;\r
284         ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge;\r
285         ca_attr_p->system_image_guid = hca_info_p->sys_image_guid;\r
286 \r
287         ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
288         ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
289         ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
290         ca_attr_p->raw_mcast_support    = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI;\r
291         ca_attr_p->apm_support          = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG;\r
292         ca_attr_p->av_port_check        = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
293         ca_attr_p->change_primary_port  = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
294         ca_attr_p->modify_wr_depth      = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
295         ca_attr_p->modify_srq_depth      = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE;\r
296         ca_attr_p->system_image_guid_support = hca_info_p->device_cap_flags & IB_DEVICE_SYS_IMAGE_GUID;\r
297         ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
298 \r
299         ca_attr_p->num_page_sizes = 1;\r
300         ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap\r
301 \r
302         for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num)\r
303         {\r
304                 // Setup port pointers\r
305                 ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
306                 mthca_port_p = &hca_ports[port_num];\r
307 \r
308                 // Port Cabapilities\r
309                 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
310                 mthca_port_cap_to_ibal(mthca_port_p->port_cap_flags, &ibal_port_p->cap);\r
311 \r
312                 // Port Atributes\r
313                 ibal_port_p->port_num   = port_num + start_port(ib_dev);\r
314                 ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
315                 ibal_port_p->lid        = cl_ntoh16(mthca_port_p->lid);\r
316                 ibal_port_p->lmc        = mthca_port_p->lmc;\r
317                 ibal_port_p->max_vls    = mthca_port_p->max_vl_num;\r
318                 ibal_port_p->sm_lid     = cl_ntoh16(mthca_port_p->sm_lid);\r
319                 ibal_port_p->sm_sl      = mthca_port_p->sm_sl;\r
320                 ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN;\r
321                 ibal_port_p->num_gids   = (uint16_t)mthca_port_p->gid_tbl_len;\r
322                 ibal_port_p->num_pkeys  = mthca_port_p->pkey_tbl_len;\r
323                 ibal_port_p->pkey_ctr   = (uint16_t)mthca_port_p->bad_pkey_cntr;\r
324                 ibal_port_p->qkey_ctr   = (uint16_t)mthca_port_p->qkey_viol_cntr;\r
325                 ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz;\r
326                 ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu;\r
327                 ibal_port_p->active_speed = mthca_port_p->active_speed;\r
328                 ibal_port_p->phys_state = mthca_port_p->phys_state;\r
329 \r
330                 ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;\r
331                 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
332                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",\r
333                         ibal_port_p->port_num, cl_ntoh64(ibal_port_p->port_guid)));\r
334         }\r
335 }\r
336 \r
337 void cq_comp_handler(struct ib_cq *cq, void *context)\r
338 {\r
339         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
340         struct mthca_cq *mcq =(struct mthca_cq *)cq; \r
341         HCA_ENTER(HCA_DBG_CQ);\r
342         if (hob_p && hob_p->comp_cb_p) {\r
343                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));\r
344                 (hob_p->comp_cb_p)(mcq->cq_context);\r
345         }\r
346         else {\r
347                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));\r
348         }\r
349         HCA_EXIT(HCA_DBG_CQ);\r
350 }\r
351 \r
352 void ca_event_handler(struct ib_event *ev, void *context)\r
353 {\r
354         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
355         ib_event_rec_t event_rec;\r
356 \r
357         // prepare parameters\r
358         event_rec.context = (void *)hob_p->ca_context;\r
359         event_rec.trap.info.port_num = ev->element.port_num;\r
360         event_rec.type = ev->event;\r
361         if (event_rec.type > IB_AE_UNKNOWN) {\r
362                 // CL_ASSERT(0); // This shouldn't happen\r
363                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", \r
364                         event_rec.type, IB_AE_LOCAL_FATAL));\r
365                 event_rec.type = IB_AE_LOCAL_FATAL;\r
366         }\r
367 \r
368         // call the user callback\r
369         if (hob_p && hob_p->async_cb_p)\r
370                 (hob_p->async_cb_p)(&event_rec);\r
371         else {\r
372                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
373         }\r
374 }\r
375 \r
376 void srq_event_handler(struct ib_event *ev, void *context)\r
377 {\r
378         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
379         ib_event_rec_t event_rec;\r
380         struct mthca_srq *srq_p;\r
381 \r
382         // prepare parameters\r
383         event_rec.type = ev->event;\r
384         event_rec.vendor_specific = ev->vendor_specific;\r
385         srq_p = (struct mthca_srq *)ev->element.srq;\r
386         event_rec.context = srq_p->srq_context;\r
387 \r
388         // call the user callback\r
389         if (hob_p)\r
390                 (hob_p->async_cb_p)(&event_rec);\r
391         else {\r
392                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
393         }\r
394 }\r
395 \r
396 \r
397 void qp_event_handler(struct ib_event *ev, void *context)\r
398 {\r
399         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
400         ib_event_rec_t event_rec;\r
401         struct mthca_qp *qp_p;\r
402 \r
403         // prepare parameters\r
404         event_rec.type = ev->event;\r
405         event_rec.vendor_specific = ev->vendor_specific;\r
406         qp_p = (struct mthca_qp *)ev->element.qp;\r
407         event_rec.context = qp_p->qp_context;\r
408 \r
409         // call the user callback\r
410         if (hob_p)\r
411                 (hob_p->async_cb_p)(&event_rec);\r
412         else {\r
413                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
414         }\r
415 }\r
416 \r
417 void cq_event_handler(struct ib_event *ev, void *context)\r
418 {\r
419         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
420         ib_event_rec_t event_rec;\r
421         struct mthca_cq *cq_p;\r
422 \r
423         // prepare parameters\r
424         event_rec.type = ev->event;\r
425         cq_p = (struct mthca_cq *)ev->element.cq;\r
426         event_rec.context = cq_p->cq_context;\r
427 \r
428         // call the user callback\r
429         if (hob_p)\r
430                 (hob_p->async_cb_p)(&event_rec);\r
431         else {\r
432                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
433         }\r
434 }\r
435 \r
436 ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps)\r
437 {\r
438 #define MAP_QPS(val1,val2) case val1: ib_qps = val2; break\r
439         ib_qp_state_t ib_qps;\r
440         switch (qps) {\r
441                 MAP_QPS( IBQPS_RESET, IB_QPS_RESET );\r
442                 MAP_QPS( IBQPS_INIT, IB_QPS_INIT );\r
443                 MAP_QPS( IBQPS_RTR, IB_QPS_RTR );\r
444                 MAP_QPS( IBQPS_RTS, IB_QPS_RTS );\r
445                 MAP_QPS( IBQPS_SQD, IB_QPS_SQD );\r
446                 MAP_QPS( IBQPS_SQE, IB_QPS_SQERR );\r
447                 MAP_QPS( IBQPS_ERR, IB_QPS_ERROR );\r
448                 default:\r
449                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped MTHCA qp_state %d\n", qps));\r
450                         ib_qps = 0xffffffff;\r
451         }\r
452         return ib_qps;\r
453 }\r
454 \r
455 enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps)\r
456 {\r
457 #define MAP_IBQPS(val1,val2) case val1: qps = val2; break\r
458         enum ib_qp_state qps;\r
459         switch (ib_qps) {\r
460                 MAP_IBQPS( IB_QPS_RESET, IBQPS_RESET );\r
461                 MAP_IBQPS( IB_QPS_INIT, IBQPS_INIT );\r
462                 MAP_IBQPS( IB_QPS_RTR, IBQPS_RTR );\r
463                 MAP_IBQPS( IB_QPS_RTS, IBQPS_RTS );\r
464                 MAP_IBQPS( IB_QPS_SQD, IBQPS_SQD );\r
465                 MAP_IBQPS( IB_QPS_SQD_DRAINING, IBQPS_SQD );\r
466                 MAP_IBQPS( IB_QPS_SQD_DRAINED, IBQPS_SQD );\r
467                 MAP_IBQPS( IB_QPS_SQERR, IBQPS_SQE );\r
468                 MAP_IBQPS( IB_QPS_ERROR, IBQPS_ERR );\r
469                 default:\r
470                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps));\r
471                         qps = 0xffffffff;\r
472         }\r
473         return qps;\r
474 }\r
475 \r
476 ib_api_status_t\r
477 mlnx_conv_qp_modify_attr(\r
478         IN       const  struct ib_qp *ib_qp_p,\r
479         IN                              ib_qp_type_t    qp_type,\r
480         IN       const  ib_qp_mod_t *modify_attr_p,             \r
481         OUT     struct ib_qp_attr *qp_attr_p,\r
482         OUT     int *qp_attr_mask_p\r
483         )\r
484 {\r
485         int err;\r
486         ib_api_status_t         status = IB_SUCCESS;\r
487         struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
488 \r
489         RtlZeroMemory( qp_attr_p, sizeof *qp_attr_p );\r
490         *qp_attr_mask_p = IB_QP_STATE;\r
491         qp_attr_p->qp_state = mlnx_qps_from_ibal( modify_attr_p->req_state ); \r
492 \r
493         // skipped cases\r
494         if (qp_p->state == IBQPS_RESET && modify_attr_p->req_state != IB_QPS_INIT)\r
495                 return IB_NOT_DONE;\r
496                 \r
497         switch (modify_attr_p->req_state) {\r
498         case IB_QPS_RESET:\r
499         case IB_QPS_ERROR:\r
500         case IB_QPS_SQERR:\r
501         case IB_QPS_TIME_WAIT:\r
502                 break;\r
503 \r
504         case IB_QPS_INIT:\r
505                 \r
506                 switch (qp_type) {\r
507                         case IB_QPT_RELIABLE_CONN:\r
508                         case IB_QPT_UNRELIABLE_CONN:\r
509                                 *qp_attr_mask_p |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS;\r
510                                 qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
511                                 break;\r
512                         case IB_QPT_UNRELIABLE_DGRM:\r
513                         case IB_QPT_QP0:\r
514                         case IB_QPT_QP1:\r
515                         default:        \r
516                                 *qp_attr_mask_p |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
517                                 qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
518                                 break;\r
519                 }                               \r
520                 \r
521                 // IB_QP_PORT\r
522                 qp_attr_p->port_num    = modify_attr_p->state.init.primary_port;\r
523 \r
524                 // IB_QP_PKEY_INDEX\r
525                 qp_attr_p->pkey_index = modify_attr_p->state.init.pkey_index;\r
526 \r
527                 break;\r
528                 \r
529         case IB_QPS_RTR:\r
530                 /* modifying the WQE depth is not supported */\r
531                 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
532                         modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )    {\r
533                         status = IB_UNSUPPORTED;\r
534                         break;\r
535                 }\r
536 \r
537                 switch (qp_type) {\r
538                         case IB_QPT_RELIABLE_CONN:\r
539                                 *qp_attr_mask_p |= /* required flags */\r
540                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC |\r
541                                         IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER;\r
542 \r
543                                 // IB_QP_DEST_QPN\r
544                                 qp_attr_p->dest_qp_num          = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
545 \r
546                                 // IB_QP_RQ_PSN\r
547                                 qp_attr_p->rq_psn                               = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
548                                 \r
549                                 // IB_QP_MAX_DEST_RD_ATOMIC\r
550                                 qp_attr_p->max_dest_rd_atomic   = modify_attr_p->state.rtr.resp_res;\r
551 \r
552                                 // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory)\r
553                                 err = mlnx_conv_ibal_av(ib_qp_p->device,\r
554                                         &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
555                                 if (err) {\r
556                                         status = IB_ERROR;\r
557                                         break;\r
558                                 }\r
559                                 qp_attr_p->path_mtu             = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
560                                 qp_attr_p->timeout              = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // MTU\r
561                                 qp_attr_p->retry_cnt            = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU\r
562                                 qp_attr_p->rnr_retry            = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU\r
563 \r
564                                 // IB_QP_MIN_RNR_TIMER, required in RTR, optional in RTS.\r
565                                 qp_attr_p->min_rnr_timer         = modify_attr_p->state.rtr.rnr_nak_timeout;\r
566 \r
567                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
568                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
569                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
570                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl);\r
571                                 }\r
572 \r
573                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
574                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
575                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* required flag */\r
576                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
577                                                 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
578                                         if (err) {\r
579                                                 status = IB_ERROR;\r
580                                                 break;\r
581                                         }\r
582                                         qp_attr_p->alt_timeout           = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
583                                 }\r
584 \r
585                                 // IB_QP_PKEY_INDEX \r
586                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
587                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
588                                         qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
589                                 }\r
590                                 break;\r
591                                 \r
592                         case IB_QPT_UNRELIABLE_CONN:\r
593                                 *qp_attr_mask_p |= /* required flags */\r
594                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU;\r
595 \r
596                                 // IB_QP_DEST_QPN\r
597                                 qp_attr_p->dest_qp_num          = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
598 \r
599                                 // IB_QP_RQ_PSN\r
600                                 qp_attr_p->rq_psn                               = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
601 \r
602                                 // IB_QP_PATH_MTU\r
603                                 qp_attr_p->path_mtu             = modify_attr_p->state.rtr.primary_av.conn.path_mtu;\r
604 \r
605                                 // IB_QP_AV: Convert primary AV (mandatory)\r
606                                 err = mlnx_conv_ibal_av(ib_qp_p->device,\r
607                                         &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
608                                 if (err) {\r
609                                         status = IB_ERROR;\r
610                                         break;\r
611                                 }\r
612 \r
613                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
614                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
615                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
616                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl);\r
617                                 }\r
618 \r
619                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
620                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
621                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* required flag */\r
622                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
623                                                 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
624                                         if (err) {\r
625                                                 status = IB_ERROR;\r
626                                                 break;\r
627                                         }\r
628                                 }\r
629 \r
630                                 // IB_QP_PKEY_INDEX \r
631                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
632                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
633                                         qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
634                                 }\r
635                                 break;\r
636                                         \r
637                         case IB_QPT_UNRELIABLE_DGRM:\r
638                         case IB_QPT_QP0:\r
639                         case IB_QPT_QP1:\r
640                         default:        \r
641                                 // IB_QP_PKEY_INDEX \r
642                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
643                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
644                                         qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
645                                 }\r
646 \r
647                                 // IB_QP_QKEY\r
648                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) {\r
649                                         *qp_attr_mask_p |= IB_QP_QKEY;  \r
650                                         qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.rtr.qkey);\r
651                                 }\r
652                                 break;\r
653                                 \r
654                 }\r
655                 break;\r
656                 \r
657         case IB_QPS_RTS:\r
658                 /* modifying the WQE depth is not supported */\r
659                 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
660                         modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
661                 {\r
662                         status = IB_UNSUPPORTED;\r
663                         break;\r
664                 }\r
665 \r
666                 switch (qp_type) {\r
667                         case IB_QPT_RELIABLE_CONN:\r
668                                 if (qp_p->state != IBQPS_RTS)\r
669                                         *qp_attr_mask_p |= /* required flags */\r
670                                                 IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT |\r
671                                                 IB_QP_RETRY_CNT |IB_QP_RNR_RETRY;\r
672 \r
673                                 // IB_QP_MAX_QP_RD_ATOMIC\r
674                                 qp_attr_p->max_rd_atomic        = modify_attr_p->state.rts.init_depth;\r
675 \r
676                                 // IB_QP_TIMEOUT\r
677                                 qp_attr_p->timeout               = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
678                                 \r
679                                 // IB_QP_RETRY_CNT\r
680                                 qp_attr_p->retry_cnt = modify_attr_p->state.rts.retry_cnt;\r
681                                 \r
682                                 // IB_QP_RNR_RETRY\r
683                                 qp_attr_p->rnr_retry     = modify_attr_p->state.rts.rnr_retry_cnt;\r
684 \r
685                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
686                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
687                                         *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
688                                         qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
689                                 }\r
690 \r
691 #ifdef WIN_TO_BE_REMOVED\r
692                 //TODO: do we need that ?\r
693                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
694 \r
695                                 // IB_QP_PKEY_INDEX \r
696                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) {\r
697                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
698                                         qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
699                                 }\r
700 #endif                          \r
701 \r
702                                 // IB_QP_MIN_RNR_TIMER\r
703                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
704                                         *qp_attr_mask_p |= IB_QP_MIN_RNR_TIMER; \r
705                                         qp_attr_p->min_rnr_timer         = modify_attr_p->state.rts.rnr_nak_timeout;\r
706                                 }\r
707 \r
708                                 // IB_QP_PATH_MIG_STATE\r
709                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
710                                         *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;        \r
711                                         qp_attr_p->path_mig_state =  modify_attr_p->state.rts.apm_state;\r
712                                 }\r
713 \r
714                                 // IB_QP_ACCESS_FLAGS\r
715                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
716                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
717                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl);\r
718                                 }\r
719 \r
720                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
721                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
722                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* optional flag */\r
723                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
724                                                 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
725                                         if (err) {\r
726                                                 status = IB_ERROR;\r
727                                                 break;\r
728                                         }\r
729                                         qp_attr_p->alt_timeout           = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
730                                 }\r
731                                 break;\r
732                                 \r
733                         case IB_QPT_UNRELIABLE_CONN:\r
734                                 if (qp_p->state != IBQPS_RTS)\r
735                                         *qp_attr_mask_p |= /* required flags */\r
736                                                 IB_QP_SQ_PSN;\r
737 \r
738                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
739                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
740                                         *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
741                                         qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
742                                 }\r
743 \r
744 #ifdef WIN_TO_BE_REMOVED\r
745                 //TODO: do we need that ?\r
746                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
747 \r
748                                 // IB_QP_PKEY_INDEX \r
749                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) {\r
750                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
751                                         qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
752                                 }\r
753 #endif                          \r
754 \r
755                                 // IB_QP_PATH_MIG_STATE\r
756                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
757                                         *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;        \r
758                                         qp_attr_p->path_mig_state =  modify_attr_p->state.rts.apm_state;\r
759                                 }\r
760 \r
761                                 // IB_QP_ACCESS_FLAGS\r
762                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
763                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
764                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl);\r
765                                 }\r
766 \r
767                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
768                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
769                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* optional flag */\r
770                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
771                                                 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
772                                         if (err) {\r
773                                                 status = IB_ERROR;\r
774                                                 break;\r
775                                         }\r
776                                 }\r
777                                 break;\r
778                                         \r
779                         case IB_QPT_UNRELIABLE_DGRM:\r
780                         case IB_QPT_QP0:\r
781                         case IB_QPT_QP1:\r
782                         default:        \r
783                                 if (qp_p->state != IBQPS_RTS)\r
784                                         *qp_attr_mask_p |= /* required flags */\r
785                                                 IB_QP_SQ_PSN;\r
786 \r
787                                 // IB_QP_QKEY\r
788                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_QKEY) {\r
789                                         *qp_attr_mask_p |= IB_QP_QKEY;  \r
790                                         qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.rts.qkey);\r
791                                 }\r
792                                 break;\r
793                                 \r
794                                 break;\r
795                                 \r
796                 }\r
797 \r
798                 // IB_QP_SQ_PSN: common for all\r
799                 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
800                 //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL\r
801                 break;\r
802                 \r
803         case IB_QPS_SQD:\r
804         case IB_QPS_SQD_DRAINING:\r
805         case IB_QPS_SQD_DRAINED:\r
806                 *qp_attr_mask_p |= IB_QP_EN_SQD_ASYNC_NOTIFY;\r
807                 qp_attr_p->en_sqd_async_notify = (u8)modify_attr_p->state.sqd.sqd_event;\r
808                 HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n"));\r
809                 break;\r
810                 \r
811         default:        \r
812                 //NB: is this an error case and we need this message  ? What about returning an error ?\r
813                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", modify_attr_p->req_state));\r
814                 break;\r
815                 \r
816         }\r
817 \r
818         return status;\r
819 }       \r
820 \r
821 int\r
822 mlnx_conv_ibal_av(\r
823         IN              const   struct ib_device *ib_dev_p,\r
824         IN              const   ib_av_attr_t                            *ibal_av_p,\r
825         OUT                     struct ib_ah_attr       *ah_attr_p)\r
826 {\r
827         int err = 0;\r
828         u8 port_num;\r
829         u16 gid_index;\r
830         \r
831         ah_attr_p->port_num = ibal_av_p->port_num;\r
832         ah_attr_p->sl   = ibal_av_p->sl;\r
833         ah_attr_p->dlid = cl_ntoh16(ibal_av_p->dlid);\r
834         //TODO: how static_rate is coded ?\r
835         ah_attr_p->static_rate   =\r
836                 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3);\r
837         ah_attr_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
838 \r
839         /* For global destination or Multicast address:*/\r
840         if (ibal_av_p->grh_valid)\r
841         {\r
842                 ah_attr_p->ah_flags |= IB_AH_GRH;\r
843                 ah_attr_p->grh.hop_limit     = ibal_av_p->grh.hop_limit;\r
844                 ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
845                         &ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label );\r
846                 err = ib_find_cached_gid((struct ib_device *)ib_dev_p, \r
847                         (union ib_gid   *)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index);\r
848                 if (err) {\r
849 \r
850                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default:  sgid_index = 0\n", err, err));\r
851                         gid_index = 0;\r
852                 }\r
853                 else if (port_num != ah_attr_p->port_num) {\r
854                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n", \r
855                                 (u32)port_num, (u32)ah_attr_p->port_num));\r
856                 }\r
857                 ah_attr_p->grh.sgid_index = (u8)gid_index;\r
858                 RtlCopyMemory(ah_attr_p->grh.dgid.raw, ibal_av_p->grh.dest_gid.raw, sizeof(ah_attr_p->grh.dgid));\r
859         }\r
860 \r
861         return err;\r
862 }\r
863 \r
864 int\r
865 mlnx_conv_mthca_av(\r
866         IN              const   struct ib_ah *ib_ah_p,\r
867         OUT                     ib_av_attr_t                            *ibal_av_p)\r
868 {\r
869         int err = 0;\r
870         struct ib_ud_header header;\r
871         struct mthca_ah *ah_p = (struct mthca_ah *)ib_ah_p;\r
872         struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
873         struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
874 \r
875         err = mthca_read_ah( dev_p, ah_p, &header);\r
876         if (err)\r
877                 goto err_read_ah;\r
878 \r
879         // common part\r
880         ibal_av_p->sl                   = header.lrh.service_level;\r
881         mthca_get_av_params(ah_p, &ibal_av_p->port_num,\r
882                 &ibal_av_p->dlid, &ibal_av_p->static_rate, &ibal_av_p->path_bits );\r
883 \r
884         // GRH\r
885         ibal_av_p->grh_valid = header.grh_present;\r
886         if (ibal_av_p->grh_valid) {\r
887                 ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
888                         header.grh.ip_version, header.grh.traffic_class, header.grh.flow_label );\r
889                 ibal_av_p->grh.hop_limit = header.grh.hop_limit;\r
890                 RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
891                         header.grh.source_gid.raw, sizeof(ibal_av_p->grh.src_gid));\r
892                 RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
893                         header.grh.destination_gid.raw, sizeof(ibal_av_p->grh.dest_gid));\r
894         }\r
895 \r
896         //TODO: don't know, how to fill conn. Note, that previous version didn't fill it also.\r
897 \r
898 err_read_ah:\r
899                 return err;\r
900 }\r
901 \r
902 void\r
903 mlnx_modify_ah(\r
904         IN              const   struct ib_ah *ib_ah_p,\r
905         IN      const   struct ib_ah_attr *ah_attr_p)\r
906 {\r
907         struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
908         struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
909         \r
910         mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p );\r
911 }\r
912 \r