[HW, VSTAT] Added more attributes to query CA: RESIZE_MAX_WR, CHANGE_PHYSICAL_PORT...
[mirror/winof/.git] / hw / mthca / kernel / hca_data.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "hca_driver.h"\r
35 #include "hca_utils.h"\r
36 \r
37 #if defined(EVENT_TRACING)\r
38 #ifdef offsetof\r
39 #undef offsetof\r
40 #endif\r
41 #include "hca_data.tmh"\r
42 #endif\r
43 \r
44 #include "mthca_dev.h"\r
45 #include <ib_cache.h>\r
46 \r
47 static cl_spinlock_t    hob_lock;\r
48 \r
49 \r
50 \r
51 uint32_t                g_mlnx_dpc2thread = 0;\r
52 \r
53 \r
54 cl_qlist_t              mlnx_hca_list;\r
55 \r
56 mlnx_hob_t              mlnx_hob_array[MLNX_NUM_HOBKL];         // kernel HOB - one per HCA (cmdif access)\r
57 mlnx_hobul_t    *mlnx_hobul_array[MLNX_NUM_HOBUL];      // kernel HOBUL - one per HCA (kar access)\r
58 \r
59 /////////////////////////////////////////////////////////\r
60 // ### HCA\r
61 /////////////////////////////////////////////////////////\r
62 void\r
63 mlnx_hca_insert(\r
64         IN                              mlnx_hca_t                                      *p_hca )\r
65 {\r
66         cl_spinlock_acquire( &hob_lock );\r
67         cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
68         cl_spinlock_release( &hob_lock );\r
69 }\r
70 \r
71 void\r
72 mlnx_hca_remove(\r
73         IN                              mlnx_hca_t                                      *p_hca )\r
74 {\r
75         cl_spinlock_acquire( &hob_lock );\r
76         cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
77         cl_spinlock_release( &hob_lock );\r
78 }\r
79 \r
80 mlnx_hca_t*\r
81 mlnx_hca_from_guid(\r
82         IN                              ib_net64_t                                      guid )\r
83 {\r
84         cl_list_item_t  *p_item;\r
85         mlnx_hca_t              *p_hca = NULL;\r
86 \r
87         cl_spinlock_acquire( &hob_lock );\r
88         p_item = cl_qlist_head( &mlnx_hca_list );\r
89         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
90         {\r
91                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
92                 if( p_hca->guid == guid )\r
93                         break;\r
94                 p_item = cl_qlist_next( p_item );\r
95                 p_hca = NULL;\r
96         }\r
97         cl_spinlock_release( &hob_lock );\r
98         return p_hca;\r
99 }\r
100 \r
101 /*\r
102 void\r
103 mlnx_names_from_guid(\r
104         IN                              ib_net64_t                                      guid,\r
105                 OUT                     char                                            **hca_name_p,\r
106                 OUT                     char                                            **dev_name_p)\r
107 {\r
108         unsigned int idx;\r
109 \r
110         if (!hca_name_p) return;\r
111         if (!dev_name_p) return;\r
112 \r
113         for (idx = 0; idx < mlnx_num_hca; idx++)\r
114         {\r
115                 if (mlnx_hca_array[idx].ifx.guid == guid)\r
116                 {\r
117                         *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
118                         *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
119                 }\r
120         }\r
121 }\r
122 */\r
123 \r
124 /////////////////////////////////////////////////////////\r
125 // ### HCA\r
126 /////////////////////////////////////////////////////////\r
127 cl_status_t\r
128 mlnx_hcas_init( void )\r
129 {\r
130         cl_qlist_init( &mlnx_hca_list );\r
131         return cl_spinlock_init( &hob_lock );\r
132 }\r
133 \r
134 \r
135 /////////////////////////////////////////////////////////\r
136 /////////////////////////////////////////////////////////\r
137 ib_api_status_t\r
138 mlnx_hobs_set_cb(\r
139         IN                              mlnx_hob_t                                      *hob_p, \r
140         IN                              ci_completion_cb_t                      comp_cb_p,\r
141         IN                              ci_async_event_cb_t                     async_cb_p,\r
142         IN              const   void* const                                     ib_context)\r
143 {\r
144         cl_status_t             cl_status;\r
145 \r
146         // Setup the callbacks\r
147         if (!hob_p->async_proc_mgr_p)\r
148         {\r
149                 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
150                 if( !hob_p->async_proc_mgr_p )\r
151                 {\r
152                         return IB_INSUFFICIENT_MEMORY;\r
153                 }\r
154                 cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
155                 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
156                 if( cl_status != CL_SUCCESS )\r
157                 {\r
158                         cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
159                         cl_free(hob_p->async_proc_mgr_p);\r
160                         hob_p->async_proc_mgr_p = NULL;\r
161                         return IB_INSUFFICIENT_RESOURCES;\r
162                 }\r
163         }\r
164 \r
165         hob_p->comp_cb_p        = comp_cb_p;\r
166         hob_p->async_cb_p = async_cb_p;\r
167         hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
168         HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array), ib_context));\r
169         return IB_SUCCESS;\r
170 }\r
171 \r
172 /////////////////////////////////////////////////////////\r
173 /////////////////////////////////////////////////////////\r
174 void\r
175 mlnx_hobs_remove(\r
176         IN                              mlnx_hob_t                                      *hob_p)\r
177 {\r
178         cl_async_proc_t *p_async_proc;\r
179 \r
180 \r
181         cl_spinlock_acquire( &hob_lock );\r
182 \r
183         hob_p->mark = E_MARK_INVALID;\r
184 \r
185         p_async_proc = hob_p->async_proc_mgr_p;\r
186         hob_p->async_proc_mgr_p = NULL;\r
187 \r
188         hob_p->comp_cb_p = NULL;\r
189         hob_p->async_cb_p = NULL;\r
190         hob_p->ca_context = NULL;\r
191         hob_p->cl_device_h = NULL;\r
192 \r
193         cl_spinlock_release( &hob_lock );\r
194 \r
195         if( p_async_proc )\r
196         {\r
197                 cl_async_proc_destroy( p_async_proc );\r
198                 cl_free( p_async_proc );\r
199         }\r
200 \r
201 \r
202 \r
203         HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hobs_remove idx %d \n", (int)(hob_p - mlnx_hob_array)));\r
204 }\r
205 \r
206 /////////////////////////////////////////////////////////\r
207 /////////////////////////////////////////////////////////\r
208 void\r
209 mthca_port_cap_to_ibal(\r
210         IN                              u32                     mthca_port_cap,\r
211                 OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
212 {\r
213 #define SET_CAP(flag,cap)       if (mthca_port_cap & flag) ibal_port_cap_p->cap = TRUE\r
214 \r
215         SET_CAP(IB_PORT_CM_SUP,cm);\r
216         SET_CAP(IB_PORT_SNMP_TUNNEL_SUP,snmp);\r
217         SET_CAP(IB_PORT_DEVICE_MGMT_SUP,dev_mgmt);\r
218         SET_CAP(IB_PORT_VENDOR_CLASS_SUP,vend);\r
219         SET_CAP(IB_PORT_SM_DISABLED,sm_disable);\r
220         SET_CAP(IB_PORT_SM,sm);\r
221         SET_CAP(IB_PORT_NOTICE_SUP,notice);\r
222         SET_CAP(IB_PORT_TRAP_SUP,trap);\r
223         SET_CAP(IB_PORT_AUTO_MIGR_SUP,apm);\r
224         SET_CAP(IB_PORT_SL_MAP_SUP,slmap);\r
225         SET_CAP(IB_PORT_LED_INFO_SUP,ledinfo);\r
226         SET_CAP(IB_PORT_CAP_MASK_NOTICE_SUP,capm_notice);\r
227         SET_CAP(IB_PORT_CLIENT_REG_SUP,client_reregister);\r
228         SET_CAP(IB_PORT_SYS_IMAGE_GUID_SUP,sysguid);\r
229         SET_CAP(IB_PORT_BOOT_MGMT_SUP,boot_mgmt);\r
230         SET_CAP(IB_PORT_DR_NOTICE_SUP,dr_notice);\r
231         SET_CAP(IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP,pkey_switch_ext_port);\r
232         SET_CAP(IB_PORT_LINK_LATENCY_SUP,link_rtl);\r
233         SET_CAP(IB_PORT_REINIT_SUP,reinit);\r
234         SET_CAP(IB_PORT_OPT_IPD_SUP,ipd);\r
235         SET_CAP(IB_PORT_MKEY_NVRAM,mkey_nvram);\r
236         SET_CAP(IB_PORT_PKEY_NVRAM,pkey_nvram);\r
237         // there no MTHCA flags for qkey_ctr, pkey_ctr, port_active, bm IBAL capabilities;\r
238 }\r
239 \r
240 \r
241 /////////////////////////////////////////////////////////\r
242 void\r
243 mlnx_conv_hca_cap(\r
244         IN                              struct ib_device *ib_dev,\r
245         IN                              struct ib_device_attr *hca_info_p,\r
246         IN                              struct ib_port_attr  *hca_ports,\r
247         OUT                     ib_ca_attr_t                            *ca_attr_p)\r
248 {\r
249         uint8_t                 port_num;\r
250         ib_port_attr_t  *ibal_port_p;\r
251         struct ib_port_attr  *mthca_port_p;\r
252 \r
253         ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
254         ca_attr_p->dev_id   = (uint16_t)hca_info_p->vendor_part_id;\r
255         ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
256         ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
257         ca_attr_p->ca_guid   = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid;\r
258         ca_attr_p->num_ports = ib_dev->phys_port_cnt;\r
259         ca_attr_p->max_qps   = hca_info_p->max_qp;\r
260         ca_attr_p->max_wrs   = hca_info_p->max_qp_wr;\r
261         ca_attr_p->max_sges   = hca_info_p->max_sge;\r
262         ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd;\r
263         ca_attr_p->max_cqs    = hca_info_p->max_cq;\r
264         ca_attr_p->max_cqes  = hca_info_p->max_cqe;\r
265         ca_attr_p->max_pds    = hca_info_p->max_pd;\r
266         ca_attr_p->init_regions = hca_info_p->max_mr;\r
267         ca_attr_p->init_windows = hca_info_p->max_mw;\r
268         ca_attr_p->init_region_size = hca_info_p->max_mr_size;\r
269         ca_attr_p->max_addr_handles = hca_info_p->max_ah;\r
270         ca_attr_p->atomicity     = hca_info_p->atomic_cap;\r
271         ca_attr_p->max_partitions = hca_info_p->max_pkeys;\r
272         ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom;\r
273         ca_attr_p->max_resp_res    = (uint8_t)hca_info_p->max_res_rd_atom;\r
274         ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom;\r
275         ca_attr_p->max_ipv6_qps    = hca_info_p->max_raw_ipv6_qp;\r
276         ca_attr_p->max_ether_qps   = hca_info_p->max_raw_ethy_qp;\r
277         ca_attr_p->max_mcast_grps  = hca_info_p->max_mcast_grp;\r
278         ca_attr_p->max_mcast_qps   = hca_info_p->max_total_mcast_qp_attach;\r
279         ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
280         ca_attr_p->max_fmr   = hca_info_p->max_fmr;\r
281         ca_attr_p->max_map_per_fmr   = hca_info_p->max_map_per_fmr;\r
282         ca_attr_p->max_srq = hca_info_p->max_srq;\r
283         ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr;\r
284         ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge;\r
285 \r
286         ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
287         ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
288         ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
289         ca_attr_p->raw_mcast_support    = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI;\r
290         ca_attr_p->apm_support          = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG;\r
291         ca_attr_p->av_port_check        = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
292         ca_attr_p->change_primary_port  = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
293         ca_attr_p->modify_wr_depth      = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
294         ca_attr_p->modify_srq_depth      = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE;\r
295         ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
296 \r
297         ca_attr_p->num_page_sizes = 1;\r
298         ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap\r
299 \r
300         for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num)\r
301         {\r
302                 // Setup port pointers\r
303                 ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
304                 mthca_port_p = &hca_ports[port_num];\r
305 \r
306                 // Port Cabapilities\r
307                 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
308                 mthca_port_cap_to_ibal(mthca_port_p->port_cap_flags, &ibal_port_p->cap);\r
309 \r
310                 // Port Atributes\r
311                 ibal_port_p->port_num   = port_num + start_port(ib_dev);\r
312                 ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
313                 ibal_port_p->lid        = cl_ntoh16(mthca_port_p->lid);\r
314                 ibal_port_p->lmc        = mthca_port_p->lmc;\r
315                 ibal_port_p->max_vls    = mthca_port_p->max_vl_num;\r
316                 ibal_port_p->sm_lid     = cl_ntoh16(mthca_port_p->sm_lid);\r
317                 ibal_port_p->sm_sl      = mthca_port_p->sm_sl;\r
318                 ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN;\r
319                 ibal_port_p->num_gids   = (uint16_t)mthca_port_p->gid_tbl_len;\r
320                 ibal_port_p->num_pkeys  = mthca_port_p->pkey_tbl_len;\r
321                 ibal_port_p->pkey_ctr   = (uint16_t)mthca_port_p->bad_pkey_cntr;\r
322                 ibal_port_p->qkey_ctr   = (uint16_t)mthca_port_p->qkey_viol_cntr;\r
323                 ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz;\r
324                 ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu;\r
325                 ibal_port_p->active_speed = mthca_port_p->active_speed;\r
326                 ibal_port_p->phys_state = mthca_port_p->phys_state;\r
327 \r
328                 ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;\r
329                 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
330                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",\r
331                         ibal_port_p->port_num, cl_ntoh64(ibal_port_p->port_guid)));\r
332         }\r
333 }\r
334 \r
335 void cq_comp_handler(struct ib_cq *cq, void *context)\r
336 {\r
337         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
338         struct mthca_cq *mcq =(struct mthca_cq *)cq; \r
339         HCA_ENTER(HCA_DBG_CQ);\r
340         if (hob_p && hob_p->comp_cb_p) {\r
341                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));\r
342                 (hob_p->comp_cb_p)(mcq->cq_context);\r
343         }\r
344         else {\r
345                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));\r
346         }\r
347         HCA_EXIT(HCA_DBG_CQ);\r
348 }\r
349 \r
350 void ca_event_handler(struct ib_event *ev, void *context)\r
351 {\r
352         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
353         ib_event_rec_t event_rec;\r
354 \r
355         // prepare parameters\r
356         event_rec.context = (void *)hob_p->ca_context;\r
357         event_rec.trap.info.port_num = ev->element.port_num;\r
358         event_rec.type = ev->event;\r
359         if (event_rec.type > IB_AE_UNKNOWN) {\r
360                 // CL_ASSERT(0); // This shouldn't happen\r
361                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", \r
362                         event_rec.type, IB_AE_LOCAL_FATAL));\r
363                 event_rec.type = IB_AE_LOCAL_FATAL;\r
364         }\r
365 \r
366         // call the user callback\r
367         if (hob_p && hob_p->async_cb_p)\r
368                 (hob_p->async_cb_p)(&event_rec);\r
369         else {\r
370                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
371         }\r
372 }\r
373 \r
374 void srq_event_handler(struct ib_event *ev, void *context)\r
375 {\r
376         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
377         ib_event_rec_t event_rec;\r
378         struct mthca_srq *srq_p;\r
379 \r
380         // prepare parameters\r
381         event_rec.type = ev->event;\r
382         event_rec.vendor_specific = ev->vendor_specific;\r
383         srq_p = (struct mthca_srq *)ev->element.srq;\r
384         event_rec.context = srq_p->srq_context;\r
385 \r
386         // call the user callback\r
387         if (hob_p)\r
388                 (hob_p->async_cb_p)(&event_rec);\r
389         else {\r
390                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
391         }\r
392 }\r
393 \r
394 \r
395 void qp_event_handler(struct ib_event *ev, void *context)\r
396 {\r
397         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
398         ib_event_rec_t event_rec;\r
399         struct mthca_qp *qp_p;\r
400 \r
401         // prepare parameters\r
402         event_rec.type = ev->event;\r
403         event_rec.vendor_specific = ev->vendor_specific;\r
404         qp_p = (struct mthca_qp *)ev->element.qp;\r
405         event_rec.context = qp_p->qp_context;\r
406 \r
407         // call the user callback\r
408         if (hob_p)\r
409                 (hob_p->async_cb_p)(&event_rec);\r
410         else {\r
411                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
412         }\r
413 }\r
414 \r
415 void cq_event_handler(struct ib_event *ev, void *context)\r
416 {\r
417         mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
418         ib_event_rec_t event_rec;\r
419         struct mthca_cq *cq_p;\r
420 \r
421         // prepare parameters\r
422         event_rec.type = ev->event;\r
423         cq_p = (struct mthca_cq *)ev->element.cq;\r
424         event_rec.context = cq_p->cq_context;\r
425 \r
426         // call the user callback\r
427         if (hob_p)\r
428                 (hob_p->async_cb_p)(&event_rec);\r
429         else {\r
430                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
431         }\r
432 }\r
433 \r
434 ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps)\r
435 {\r
436 #define MAP_QPS(val1,val2) case val1: ib_qps = val2; break\r
437         ib_qp_state_t ib_qps;\r
438         switch (qps) {\r
439                 MAP_QPS( IBQPS_RESET, IB_QPS_RESET );\r
440                 MAP_QPS( IBQPS_INIT, IB_QPS_INIT );\r
441                 MAP_QPS( IBQPS_RTR, IB_QPS_RTR );\r
442                 MAP_QPS( IBQPS_RTS, IB_QPS_RTS );\r
443                 MAP_QPS( IBQPS_SQD, IB_QPS_SQD );\r
444                 MAP_QPS( IBQPS_SQE, IB_QPS_SQERR );\r
445                 MAP_QPS( IBQPS_ERR, IB_QPS_ERROR );\r
446                 default:\r
447                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped MTHCA qp_state %d\n", qps));\r
448                         ib_qps = 0xffffffff;\r
449         }\r
450         return ib_qps;\r
451 }\r
452 \r
453 enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps)\r
454 {\r
455 #define MAP_IBQPS(val1,val2) case val1: qps = val2; break\r
456         enum ib_qp_state qps;\r
457         switch (ib_qps) {\r
458                 MAP_IBQPS( IB_QPS_RESET, IBQPS_RESET );\r
459                 MAP_IBQPS( IB_QPS_INIT, IBQPS_INIT );\r
460                 MAP_IBQPS( IB_QPS_RTR, IBQPS_RTR );\r
461                 MAP_IBQPS( IB_QPS_RTS, IBQPS_RTS );\r
462                 MAP_IBQPS( IB_QPS_SQD, IBQPS_SQD );\r
463                 MAP_IBQPS( IB_QPS_SQD_DRAINING, IBQPS_SQD );\r
464                 MAP_IBQPS( IB_QPS_SQD_DRAINED, IBQPS_SQD );\r
465                 MAP_IBQPS( IB_QPS_SQERR, IBQPS_SQE );\r
466                 MAP_IBQPS( IB_QPS_ERROR, IBQPS_ERR );\r
467                 default:\r
468                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps));\r
469                         qps = 0xffffffff;\r
470         }\r
471         return qps;\r
472 }\r
473 \r
474 ib_api_status_t\r
475 mlnx_conv_qp_modify_attr(\r
476         IN       const  struct ib_qp *ib_qp_p,\r
477         IN                              ib_qp_type_t    qp_type,\r
478         IN       const  ib_qp_mod_t *modify_attr_p,             \r
479         OUT     struct ib_qp_attr *qp_attr_p,\r
480         OUT     int *qp_attr_mask_p\r
481         )\r
482 {\r
483         int err;\r
484         ib_api_status_t         status = IB_SUCCESS;\r
485         struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
486 \r
487         RtlZeroMemory( qp_attr_p, sizeof *qp_attr_p );\r
488         *qp_attr_mask_p = IB_QP_STATE;\r
489         qp_attr_p->qp_state = mlnx_qps_from_ibal( modify_attr_p->req_state ); \r
490 \r
491         // skipped cases\r
492         if (qp_p->state == IBQPS_RESET && modify_attr_p->req_state != IB_QPS_INIT)\r
493                 return IB_NOT_DONE;\r
494                 \r
495         switch (modify_attr_p->req_state) {\r
496         case IB_QPS_RESET:\r
497         case IB_QPS_ERROR:\r
498         case IB_QPS_SQERR:\r
499         case IB_QPS_TIME_WAIT:\r
500                 break;\r
501 \r
502         case IB_QPS_INIT:\r
503                 \r
504                 switch (qp_type) {\r
505                         case IB_QPT_RELIABLE_CONN:\r
506                         case IB_QPT_UNRELIABLE_CONN:\r
507                                 *qp_attr_mask_p |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS;\r
508                                 qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
509                                 break;\r
510                         case IB_QPT_UNRELIABLE_DGRM:\r
511                         case IB_QPT_QP0:\r
512                         case IB_QPT_QP1:\r
513                         default:        \r
514                                 *qp_attr_mask_p |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
515                                 qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
516                                 break;\r
517                 }                               \r
518                 \r
519                 // IB_QP_PORT\r
520                 qp_attr_p->port_num    = modify_attr_p->state.init.primary_port;\r
521 \r
522                 // IB_QP_PKEY_INDEX\r
523                 qp_attr_p->pkey_index = modify_attr_p->state.init.pkey_index;\r
524 \r
525                 break;\r
526                 \r
527         case IB_QPS_RTR:\r
528                 /* modifying the WQE depth is not supported */\r
529                 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
530                         modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )    {\r
531                         status = IB_UNSUPPORTED;\r
532                         break;\r
533                 }\r
534 \r
535                 switch (qp_type) {\r
536                         case IB_QPT_RELIABLE_CONN:\r
537                                 *qp_attr_mask_p |= /* required flags */\r
538                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC |\r
539                                         IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER;\r
540 \r
541                                 // IB_QP_DEST_QPN\r
542                                 qp_attr_p->dest_qp_num          = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
543 \r
544                                 // IB_QP_RQ_PSN\r
545                                 qp_attr_p->rq_psn                               = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
546                                 \r
547                                 // IB_QP_MAX_DEST_RD_ATOMIC\r
548                                 qp_attr_p->max_dest_rd_atomic   = modify_attr_p->state.rtr.resp_res;\r
549 \r
550                                 // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory)\r
551                                 err = mlnx_conv_ibal_av(ib_qp_p->device,\r
552                                         &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
553                                 if (err) {\r
554                                         status = IB_ERROR;\r
555                                         break;\r
556                                 }\r
557                                 qp_attr_p->path_mtu             = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
558                                 qp_attr_p->timeout              = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // MTU\r
559                                 qp_attr_p->retry_cnt            = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU\r
560                                 qp_attr_p->rnr_retry            = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU\r
561 \r
562                                 // IB_QP_MIN_RNR_TIMER, required in RTR, optional in RTS.\r
563                                 qp_attr_p->min_rnr_timer         = modify_attr_p->state.rtr.rnr_nak_timeout;\r
564 \r
565                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
566                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
567                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
568                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl);\r
569                                 }\r
570 \r
571                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
572                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
573                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* required flag */\r
574                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
575                                                 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
576                                         if (err) {\r
577                                                 status = IB_ERROR;\r
578                                                 break;\r
579                                         }\r
580                                         qp_attr_p->alt_timeout           = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
581                                 }\r
582 \r
583                                 // IB_QP_PKEY_INDEX \r
584                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
585                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
586                                         qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
587                                 }\r
588                                 break;\r
589                                 \r
590                         case IB_QPT_UNRELIABLE_CONN:\r
591                                 *qp_attr_mask_p |= /* required flags */\r
592                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU;\r
593 \r
594                                 // IB_QP_DEST_QPN\r
595                                 qp_attr_p->dest_qp_num          = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
596 \r
597                                 // IB_QP_RQ_PSN\r
598                                 qp_attr_p->rq_psn                               = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
599 \r
600                                 // IB_QP_PATH_MTU\r
601                                 qp_attr_p->path_mtu             = modify_attr_p->state.rtr.primary_av.conn.path_mtu;\r
602 \r
603                                 // IB_QP_AV: Convert primary AV (mandatory)\r
604                                 err = mlnx_conv_ibal_av(ib_qp_p->device,\r
605                                         &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
606                                 if (err) {\r
607                                         status = IB_ERROR;\r
608                                         break;\r
609                                 }\r
610 \r
611                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
612                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
613                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
614                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl);\r
615                                 }\r
616 \r
617                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
618                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
619                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* required flag */\r
620                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
621                                                 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
622                                         if (err) {\r
623                                                 status = IB_ERROR;\r
624                                                 break;\r
625                                         }\r
626                                 }\r
627 \r
628                                 // IB_QP_PKEY_INDEX \r
629                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
630                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
631                                         qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
632                                 }\r
633                                 break;\r
634                                         \r
635                         case IB_QPT_UNRELIABLE_DGRM:\r
636                         case IB_QPT_QP0:\r
637                         case IB_QPT_QP1:\r
638                         default:        \r
639                                 // IB_QP_PKEY_INDEX \r
640                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
641                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
642                                         qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
643                                 }\r
644 \r
645                                 // IB_QP_QKEY\r
646                                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) {\r
647                                         *qp_attr_mask_p |= IB_QP_QKEY;  \r
648                                         qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.rtr.qkey);\r
649                                 }\r
650                                 break;\r
651                                 \r
652                 }\r
653                 break;\r
654                 \r
655         case IB_QPS_RTS:\r
656                 /* modifying the WQE depth is not supported */\r
657                 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
658                         modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
659                 {\r
660                         status = IB_UNSUPPORTED;\r
661                         break;\r
662                 }\r
663 \r
664                 switch (qp_type) {\r
665                         case IB_QPT_RELIABLE_CONN:\r
666                                 if (qp_p->state != IBQPS_RTS)\r
667                                         *qp_attr_mask_p |= /* required flags */\r
668                                                 IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT |\r
669                                                 IB_QP_RETRY_CNT |IB_QP_RNR_RETRY;\r
670 \r
671                                 // IB_QP_MAX_QP_RD_ATOMIC\r
672                                 qp_attr_p->max_rd_atomic        = modify_attr_p->state.rts.init_depth;\r
673 \r
674                                 // IB_QP_TIMEOUT\r
675                                 qp_attr_p->timeout               = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
676                                 \r
677                                 // IB_QP_RETRY_CNT\r
678                                 qp_attr_p->retry_cnt = modify_attr_p->state.rts.retry_cnt;\r
679                                 \r
680                                 // IB_QP_RNR_RETRY\r
681                                 qp_attr_p->rnr_retry     = modify_attr_p->state.rts.rnr_retry_cnt;\r
682 \r
683                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
684                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
685                                         *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
686                                         qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
687                                 }\r
688 \r
689 #ifdef WIN_TO_BE_REMOVED\r
690                 //TODO: do we need that ?\r
691                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
692 \r
693                                 // IB_QP_PKEY_INDEX \r
694                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) {\r
695                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
696                                         qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
697                                 }\r
698 #endif                          \r
699 \r
700                                 // IB_QP_MIN_RNR_TIMER\r
701                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
702                                         *qp_attr_mask_p |= IB_QP_MIN_RNR_TIMER; \r
703                                         qp_attr_p->min_rnr_timer         = modify_attr_p->state.rts.rnr_nak_timeout;\r
704                                 }\r
705 \r
706                                 // IB_QP_PATH_MIG_STATE\r
707                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
708                                         *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;        \r
709                                         qp_attr_p->path_mig_state =  modify_attr_p->state.rts.apm_state;\r
710                                 }\r
711 \r
712                                 // IB_QP_ACCESS_FLAGS\r
713                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
714                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
715                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl);\r
716                                 }\r
717 \r
718                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
719                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
720                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* optional flag */\r
721                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
722                                                 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
723                                         if (err) {\r
724                                                 status = IB_ERROR;\r
725                                                 break;\r
726                                         }\r
727                                         qp_attr_p->alt_timeout           = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
728                                 }\r
729                                 break;\r
730                                 \r
731                         case IB_QPT_UNRELIABLE_CONN:\r
732                                 if (qp_p->state != IBQPS_RTS)\r
733                                         *qp_attr_mask_p |= /* required flags */\r
734                                                 IB_QP_SQ_PSN;\r
735 \r
736                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
737                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
738                                         *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
739                                         qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
740                                 }\r
741 \r
742 #ifdef WIN_TO_BE_REMOVED\r
743                 //TODO: do we need that ?\r
744                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
745 \r
746                                 // IB_QP_PKEY_INDEX \r
747                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) {\r
748                                         *qp_attr_mask_p |= IB_QP_PKEY_INDEX;    \r
749                                         qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
750                                 }\r
751 #endif                          \r
752 \r
753                                 // IB_QP_PATH_MIG_STATE\r
754                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
755                                         *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;        \r
756                                         qp_attr_p->path_mig_state =  modify_attr_p->state.rts.apm_state;\r
757                                 }\r
758 \r
759                                 // IB_QP_ACCESS_FLAGS\r
760                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
761                                         *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
762                                         qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl);\r
763                                 }\r
764 \r
765                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
766                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
767                                         *qp_attr_mask_p |= IB_QP_ALT_PATH;      /* optional flag */\r
768                                         err = mlnx_conv_ibal_av(ib_qp_p->device,\r
769                                                 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
770                                         if (err) {\r
771                                                 status = IB_ERROR;\r
772                                                 break;\r
773                                         }\r
774                                 }\r
775                                 break;\r
776                                         \r
777                         case IB_QPT_UNRELIABLE_DGRM:\r
778                         case IB_QPT_QP0:\r
779                         case IB_QPT_QP1:\r
780                         default:        \r
781                                 if (qp_p->state != IBQPS_RTS)\r
782                                         *qp_attr_mask_p |= /* required flags */\r
783                                                 IB_QP_SQ_PSN;\r
784 \r
785                                 // IB_QP_QKEY\r
786                                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_QKEY) {\r
787                                         *qp_attr_mask_p |= IB_QP_QKEY;  \r
788                                         qp_attr_p->qkey          = cl_ntoh32 (modify_attr_p->state.rts.qkey);\r
789                                 }\r
790                                 break;\r
791                                 \r
792                                 break;\r
793                                 \r
794                 }\r
795 \r
796                 // IB_QP_SQ_PSN: common for all\r
797                 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
798                 //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL\r
799                 break;\r
800                 \r
801         case IB_QPS_SQD:\r
802         case IB_QPS_SQD_DRAINING:\r
803         case IB_QPS_SQD_DRAINED:\r
804                 *qp_attr_mask_p |= IB_QP_EN_SQD_ASYNC_NOTIFY;\r
805                 qp_attr_p->en_sqd_async_notify = (u8)modify_attr_p->state.sqd.sqd_event;\r
806                 HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n"));\r
807                 break;\r
808                 \r
809         default:        \r
810                 //NB: is this an error case and we need this message  ? What about returning an error ?\r
811                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", modify_attr_p->req_state));\r
812                 break;\r
813                 \r
814         }\r
815 \r
816         return status;\r
817 }       \r
818 \r
819 int\r
820 mlnx_conv_ibal_av(\r
821         IN              const   struct ib_device *ib_dev_p,\r
822         IN              const   ib_av_attr_t                            *ibal_av_p,\r
823         OUT                     struct ib_ah_attr       *ah_attr_p)\r
824 {\r
825         int err = 0;\r
826         u8 port_num;\r
827         u16 gid_index;\r
828         \r
829         ah_attr_p->port_num = ibal_av_p->port_num;\r
830         ah_attr_p->sl   = ibal_av_p->sl;\r
831         ah_attr_p->dlid = cl_ntoh16(ibal_av_p->dlid);\r
832         //TODO: how static_rate is coded ?\r
833         ah_attr_p->static_rate   =\r
834                 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3);\r
835         ah_attr_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
836 \r
837         /* For global destination or Multicast address:*/\r
838         if (ibal_av_p->grh_valid)\r
839         {\r
840                 ah_attr_p->ah_flags |= IB_AH_GRH;\r
841                 ah_attr_p->grh.hop_limit     = ibal_av_p->grh.hop_limit;\r
842                 ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
843                         &ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label );\r
844                 err = ib_find_cached_gid((struct ib_device *)ib_dev_p, \r
845                         (union ib_gid   *)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index);\r
846                 if (err) {\r\r
847                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default:  sgid_index = 0\n", err, err));\r
848                         gid_index = 0;\r
849                 }\r
850                 else if (port_num != ah_attr_p->port_num) {\r
851                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n", \r
852                                 (u32)port_num, (u32)ah_attr_p->port_num));\r
853                 }\r
854                 ah_attr_p->grh.sgid_index = (u8)gid_index;\r
855                 RtlCopyMemory(ah_attr_p->grh.dgid.raw, ibal_av_p->grh.dest_gid.raw, sizeof(ah_attr_p->grh.dgid));\r
856         }\r
857 \r
858         return err;\r
859 }\r
860 \r
861 int\r
862 mlnx_conv_mthca_av(\r
863         IN              const   struct ib_ah *ib_ah_p,\r
864         OUT                     ib_av_attr_t                            *ibal_av_p)\r
865 {\r
866         int err = 0;\r
867         struct ib_ud_header header;\r
868         struct mthca_ah *ah_p = (struct mthca_ah *)ib_ah_p;\r
869         struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
870         struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
871 \r
872         err = mthca_read_ah( dev_p, ah_p, &header);\r
873         if (err)\r
874                 goto err_read_ah;\r
875 \r
876         // common part\r
877         ibal_av_p->sl                   = header.lrh.service_level;\r
878         mthca_get_av_params(ah_p, &ibal_av_p->port_num,\r
879                 &ibal_av_p->dlid, &ibal_av_p->static_rate, &ibal_av_p->path_bits );\r
880 \r
881         // GRH\r
882         ibal_av_p->grh_valid = header.grh_present;\r
883         if (ibal_av_p->grh_valid) {\r
884                 ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
885                         header.grh.ip_version, header.grh.traffic_class, header.grh.flow_label );\r
886                 ibal_av_p->grh.hop_limit = header.grh.hop_limit;\r
887                 RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
888                         header.grh.source_gid.raw, sizeof(ibal_av_p->grh.src_gid));\r
889                 RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
890                         header.grh.destination_gid.raw, sizeof(ibal_av_p->grh.dest_gid));\r
891         }\r
892 \r
893         //TODO: don't know, how to fill conn. Note, that previous version didn't fill it also.\r
894 \r
895 err_read_ah:\r
896                 return err;\r
897 }\r
898 \r
899 void\r
900 mlnx_modify_ah(\r
901         IN              const   struct ib_ah *ib_ah_p,\r
902         IN      const   struct ib_ah_attr *ah_attr_p)\r
903 {\r
904         struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
905         struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
906         \r
907         mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p );\r
908 }\r
909 \r