[HW, TOOLS] return system_image_guid
[mirror/winof/.git] / hw / mlx4 / kernel / hca / data.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id: data.c 1944 2007-02-12 16:16:00Z sleybo $\r
31  */\r
32 \r
33 \r
34 #include "precomp.h"\r
35 \r
36 #if defined(EVENT_TRACING)\r
37 #ifdef offsetof\r
38 #undef offsetof\r
39 #endif\r
40 #include "data.tmh"\r
41 #endif\r
42 \r
43 static cl_spinlock_t    hca_lock;\r
44 \r
45 \r
46 \r
47 uint32_t                g_mlnx_dpc2thread = 0;\r
48 \r
49 \r
50 cl_qlist_t              mlnx_hca_list;\r
51 \r
52 /////////////////////////////////////////////////////////\r
53 // ### HCA\r
54 /////////////////////////////////////////////////////////\r
55 void\r
56 mlnx_hca_insert(\r
57         IN                              mlnx_hca_t                                      *p_hca )\r
58 {\r
59         cl_spinlock_acquire( &hca_lock );\r
60         cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
61         cl_spinlock_release( &hca_lock );\r
62 }\r
63 \r
64 void\r
65 mlnx_hca_remove(\r
66         IN                              mlnx_hca_t                                      *p_hca )\r
67 {\r
68         cl_spinlock_acquire( &hca_lock );\r
69         cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
70         cl_spinlock_release( &hca_lock );\r
71 }\r
72 \r
73 mlnx_hca_t*\r
74 mlnx_hca_from_guid(\r
75         IN                              ib_net64_t                                      guid )\r
76 {\r
77         cl_list_item_t  *p_item;\r
78         mlnx_hca_t              *p_hca = NULL;\r
79 \r
80         cl_spinlock_acquire( &hca_lock );\r
81         p_item = cl_qlist_head( &mlnx_hca_list );\r
82         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
83         {\r
84                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
85                 if( p_hca->guid == guid )\r
86                         break;\r
87                 p_item = cl_qlist_next( p_item );\r
88                 p_hca = NULL;\r
89         }\r
90         cl_spinlock_release( &hca_lock );\r
91         return p_hca;\r
92 }\r
93 \r
94 /////////////////////////////////////////////////////////\r
95 // ### HCA\r
96 /////////////////////////////////////////////////////////\r
97 cl_status_t\r
98 mlnx_hcas_init( void )\r
99 {\r
100         cl_qlist_init( &mlnx_hca_list );\r
101         return cl_spinlock_init( &hca_lock );\r
102 }\r
103 \r
104 \r
105 /////////////////////////////////////////////////////////\r
106 /////////////////////////////////////////////////////////\r
107 ib_api_status_t\r
108 mlnx_set_cb(\r
109         IN                              mlnx_hca_t                              *       p_hca, \r
110         IN                              ci_completion_cb_t                      comp_cb_p,\r
111         IN                              ci_async_event_cb_t                     async_cb_p,\r
112         IN              const   void* const                                     ib_context)\r
113 {\r
114         cl_status_t             cl_status;\r
115 \r
116         // Setup the callbacks\r
117         if (!p_hca->async_proc_mgr_p)\r
118         {\r
119                 p_hca->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
120                 if( !p_hca->async_proc_mgr_p )\r
121                 {\r
122                         return IB_INSUFFICIENT_MEMORY;\r
123                 }\r
124                 cl_async_proc_construct( p_hca->async_proc_mgr_p );\r
125                 cl_status = cl_async_proc_init( p_hca->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
126                 if( cl_status != CL_SUCCESS )\r
127                 {\r
128                         cl_async_proc_destroy( p_hca->async_proc_mgr_p );\r
129                         cl_free(p_hca->async_proc_mgr_p);\r
130                         p_hca->async_proc_mgr_p = NULL;\r
131                         return IB_INSUFFICIENT_RESOURCES;\r
132                 }\r
133         }\r
134 \r
135         p_hca->comp_cb_p        = comp_cb_p;\r
136         p_hca->async_cb_p = async_cb_p;\r
137         p_hca->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
138         return IB_SUCCESS;\r
139 }\r
140 \r
141 /////////////////////////////////////////////////////////\r
142 /////////////////////////////////////////////////////////\r
143 void\r
144 mlnx_reset_cb(\r
145         IN                              mlnx_hca_t                              *       p_hca)\r
146 {\r
147         cl_async_proc_t *p_async_proc;\r
148 \r
149 \r
150         cl_spinlock_acquire( &hca_lock );\r
151 \r
152         p_async_proc = p_hca->async_proc_mgr_p;\r
153         p_hca->async_proc_mgr_p = NULL;\r
154 \r
155         p_hca->comp_cb_p = NULL;\r
156         p_hca->async_cb_p = NULL;\r
157         p_hca->ca_context = NULL;\r
158         p_hca->cl_device_h = NULL;\r
159 \r
160         cl_spinlock_release( &hca_lock );\r
161 \r
162         if( p_async_proc )\r
163         {\r
164                 cl_async_proc_destroy( p_async_proc );\r
165                 cl_free( p_async_proc );\r
166         }\r
167 \r
168 }\r
169 \r
170 /////////////////////////////////////////////////////////\r
171 void\r
172 from_port_cap(\r
173         IN                              u32                     mthca_port_cap,\r
174                 OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
175 {\r
176 #define SET_CAP(flag,cap)       if (mthca_port_cap & flag) ibal_port_cap_p->cap = TRUE\r
177 \r
178         SET_CAP(IB_PORT_CM_SUP,cm);\r
179         SET_CAP(IB_PORT_SNMP_TUNNEL_SUP,snmp);\r
180         SET_CAP(IB_PORT_DEVICE_MGMT_SUP,dev_mgmt);\r
181         SET_CAP(IB_PORT_VENDOR_CLASS_SUP,vend);\r
182         SET_CAP(IB_PORT_SM_DISABLED,sm_disable);\r
183         SET_CAP(IB_PORT_SM,sm);\r
184         SET_CAP(IB_PORT_NOTICE_SUP,notice);\r
185         SET_CAP(IB_PORT_TRAP_SUP,trap);\r
186         SET_CAP(IB_PORT_AUTO_MIGR_SUP,apm);\r
187         SET_CAP(IB_PORT_SL_MAP_SUP,slmap);\r
188         SET_CAP(IB_PORT_LED_INFO_SUP,ledinfo);\r
189         SET_CAP(IB_PORT_CAP_MASK_NOTICE_SUP,capm_notice);\r
190         SET_CAP(IB_PORT_CLIENT_REG_SUP,client_reregister);\r
191         SET_CAP(IB_PORT_SYS_IMAGE_GUID_SUP,sysguid);\r
192         SET_CAP(IB_PORT_BOOT_MGMT_SUP,boot_mgmt);\r
193         SET_CAP(IB_PORT_DR_NOTICE_SUP,dr_notice);\r
194         SET_CAP(IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP,pkey_switch_ext_port);\r
195         SET_CAP(IB_PORT_LINK_LATENCY_SUP,link_rtl);\r
196         SET_CAP(IB_PORT_REINIT_SUP,reinit);\r
197         SET_CAP(IB_PORT_OPT_IPD_SUP,ipd);\r
198         SET_CAP(IB_PORT_MKEY_NVRAM,mkey_nvram);\r
199         SET_CAP(IB_PORT_PKEY_NVRAM,pkey_nvram);\r
200         // there no MTHCA flags for qkey_ctr, pkey_ctr, port_active, bm IBAL capabilities;\r
201 }\r
202 \r
203 \r
204 /////////////////////////////////////////////////////////\r
205 void\r
206 from_hca_cap(\r
207         IN                              struct ib_device *ib_dev,\r
208         IN                              struct ib_device_attr *hca_info_p,\r
209         IN                              struct ib_port_attr  *hca_ports,\r
210         OUT                     ib_ca_attr_t                            *ca_attr_p)\r
211 {\r
212         uint8_t                 port_num;\r
213         ib_port_attr_t  *ibal_port_p;\r
214         struct ib_port_attr  *mthca_port_p;\r
215 \r
216         ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
217         ca_attr_p->dev_id   = (uint16_t)hca_info_p->vendor_part_id;\r
218         ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
219         ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
220         ca_attr_p->ca_guid   = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid;\r
221         ca_attr_p->num_ports = ib_dev->phys_port_cnt;\r
222         ca_attr_p->max_qps   = hca_info_p->max_qp;\r
223         ca_attr_p->max_wrs   = hca_info_p->max_qp_wr;\r
224         ca_attr_p->max_sges   = hca_info_p->max_sge;\r
225         ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd;\r
226         ca_attr_p->max_cqs    = hca_info_p->max_cq;\r
227         ca_attr_p->max_cqes  = hca_info_p->max_cqe;\r
228         ca_attr_p->max_pds    = hca_info_p->max_pd;\r
229         ca_attr_p->init_regions = hca_info_p->max_mr;\r
230         ca_attr_p->init_windows = hca_info_p->max_mw;\r
231         ca_attr_p->init_region_size = hca_info_p->max_mr_size;\r
232         ca_attr_p->max_addr_handles = hca_info_p->max_ah;\r
233         ca_attr_p->atomicity     = hca_info_p->atomic_cap;\r
234         ca_attr_p->max_partitions = hca_info_p->max_pkeys;\r
235         ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom;\r
236         ca_attr_p->max_resp_res    = (uint8_t)hca_info_p->max_res_rd_atom;\r
237         ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom;\r
238         ca_attr_p->max_ipv6_qps    = hca_info_p->max_raw_ipv6_qp;\r
239         ca_attr_p->max_ether_qps   = hca_info_p->max_raw_ethy_qp;\r
240         ca_attr_p->max_mcast_grps  = hca_info_p->max_mcast_grp;\r
241         ca_attr_p->max_mcast_qps   = hca_info_p->max_total_mcast_qp_attach;\r
242         ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
243         ca_attr_p->max_fmr   = hca_info_p->max_fmr;\r
244         ca_attr_p->max_map_per_fmr   = hca_info_p->max_map_per_fmr;\r
245         ca_attr_p->max_srq = hca_info_p->max_srq;\r
246         ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr;\r
247         ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge;\r
248         ca_attr_p->system_image_guid = hca_info_p->sys_image_guid;\r
249 \r
250         ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
251         ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
252         ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
253         ca_attr_p->raw_mcast_support    = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI;\r
254         ca_attr_p->apm_support          = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG;\r
255         ca_attr_p->av_port_check        = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
256         ca_attr_p->change_primary_port  = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
257         ca_attr_p->modify_wr_depth      = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
258         ca_attr_p->modify_srq_depth      = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE;\r
259         ca_attr_p->system_image_guid_support = hca_info_p->device_cap_flags & IB_DEVICE_SYS_IMAGE_GUID;\r
260         ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
261 \r
262         ca_attr_p->num_page_sizes = 1;\r
263         ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap\r
264 \r
265         for (port_num = 0; port_num <= (end_port(ib_dev) - start_port(ib_dev)); ++port_num)\r
266         {\r
267                 // Setup port pointers\r
268                 ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
269                 mthca_port_p = &hca_ports[port_num];\r
270 \r
271                 // Port Cabapilities\r
272                 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
273                 from_port_cap(mthca_port_p->port_cap_flags, &ibal_port_p->cap);\r
274 \r
275                 // Port Atributes\r
276                 ibal_port_p->port_num   = (u8)(port_num + start_port(ib_dev));\r
277                 ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
278                 ibal_port_p->lid        = cl_ntoh16(mthca_port_p->lid);\r
279                 ibal_port_p->lmc        = mthca_port_p->lmc;\r
280                 ibal_port_p->max_vls    = mthca_port_p->max_vl_num;\r
281                 ibal_port_p->sm_lid     = cl_ntoh16(mthca_port_p->sm_lid);\r
282                 ibal_port_p->sm_sl      = mthca_port_p->sm_sl;\r
283                 ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN;\r
284                 ibal_port_p->num_gids   = (uint16_t)mthca_port_p->gid_tbl_len;\r
285                 ibal_port_p->num_pkeys  = mthca_port_p->pkey_tbl_len;\r
286                 ibal_port_p->pkey_ctr   = (uint16_t)mthca_port_p->bad_pkey_cntr;\r
287                 ibal_port_p->qkey_ctr   = (uint16_t)mthca_port_p->qkey_viol_cntr;\r
288                 ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz;\r
289                 ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu;\r
290                 ibal_port_p->active_speed = mthca_port_p->active_speed;\r
291                 ibal_port_p->phys_state = mthca_port_p->phys_state;\r
292 \r
293                 ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;\r
294                 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
295                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",\r
296                         ibal_port_p->port_num, cl_ntoh64(ibal_port_p->port_guid)));\r
297         }\r
298 }\r
299 \r
300 void cq_comp_handler(struct ib_cq *cq, void *context)\r
301 {\r
302         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
303         struct ib_cq *p_ib_cq =(struct ib_cq *)cq; \r
304         HCA_ENTER(HCA_DBG_CQ);\r
305         if (p_hca && p_hca->comp_cb_p) {\r
306                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));\r
307                 (p_hca->comp_cb_p)(p_ib_cq->x.ctx);\r
308         }\r
309         else {\r
310                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));\r
311         }\r
312         HCA_EXIT(HCA_DBG_CQ);\r
313 }\r
314 \r
315 void ca_event_handler(struct ib_event *ev, void *context)\r
316 {\r
317         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
318         ib_event_rec_t event_rec;\r
319 \r
320         // prepare parameters\r
321         event_rec.context = (void *)p_hca->ca_context;\r
322         event_rec.trap.info.port_num = ev->element.port_num;\r
323         event_rec.type = ev->event;\r
324         if (event_rec.type > IB_AE_UNKNOWN) {\r
325                 // CL_ASSERT(0); // This shouldn't happen\r
326                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", \r
327                         event_rec.type, IB_AE_LOCAL_FATAL));\r
328                 event_rec.type = IB_AE_LOCAL_FATAL;\r
329         }\r
330 \r
331         // call the user callback\r
332         if (p_hca && p_hca->async_cb_p)\r
333                 (p_hca->async_cb_p)(&event_rec);\r
334         else {\r
335                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
336         }\r
337 }\r
338 \r
339 void srq_event_handler(struct ib_event *ev, void *context)\r
340 {\r
341         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
342         ib_event_rec_t event_rec;\r
343         struct ib_srq *p_srq;\r
344 \r
345         // prepare parameters\r
346         event_rec.type = ev->event;\r
347         // TODO: who fills x.vendor_specific\r
348         event_rec.vendor_specific = ev->x.vendor_specific;\r
349         p_srq = (struct ib_srq *)ev->element.srq;\r
350         event_rec.context = p_srq->x.ctx;\r
351 \r
352         // call the user callback\r
353         if (p_hca)\r
354                 (p_hca->async_cb_p)(&event_rec);\r
355         else {\r
356                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
357         }\r
358 }\r
359 \r
360 \r
361 void qp_event_handler(struct ib_event *ev, void *context)\r
362 {\r
363         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
364         ib_event_rec_t event_rec;\r
365         struct ib_qp *p_ib_qp;\r
366 \r
367         // prepare parameters\r
368         event_rec.type = ev->event;\r
369         event_rec.vendor_specific = ev->x.vendor_specific;\r
370         p_ib_qp = (struct ib_qp *)ev->element.qp;\r
371         event_rec.context = p_ib_qp->x.ctx;\r
372 \r
373         // call the user callback\r
374         if (p_hca)\r
375                 (p_hca->async_cb_p)(&event_rec);\r
376         else {\r
377                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
378         }\r
379 }\r
380 \r
381 void cq_event_handler(struct ib_event *ev, void *context)\r
382 {\r
383         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
384         ib_event_rec_t event_rec;\r
385         struct ib_cq *p_ib_cq;\r
386 \r
387         // prepare parameters\r
388         event_rec.type = ev->event;\r
389         p_ib_cq = (struct ib_cq *)ev->element.cq;\r
390         event_rec.context = p_ib_cq->x.ctx;\r
391 \r
392         // call the user callback\r
393         if (p_hca)\r
394                 (p_hca->async_cb_p)(&event_rec);\r
395         else {\r
396                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
397         }\r
398 }\r
399 \r
400 enum ib_rate to_rate(uint8_t rate)\r
401 {\r
402         if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IB_RATE_2_5_GBPS;\r
403         if (rate == IB_PATH_RECORD_RATE_5_GBS) return IB_RATE_5_GBPS;\r
404         if (rate == IB_PATH_RECORD_RATE_10_GBS) return IB_RATE_10_GBPS;\r
405         if (rate == IB_PATH_RECORD_RATE_20_GBS) return IB_RATE_20_GBPS;\r
406         if (rate == IB_PATH_RECORD_RATE_30_GBS) return IB_RATE_30_GBPS;\r
407         if (rate == IB_PATH_RECORD_RATE_40_GBS) return IB_RATE_40_GBPS;\r
408         if (rate == IB_PATH_RECORD_RATE_60_GBS) return IB_RATE_60_GBPS;\r
409         if (rate == IB_PATH_RECORD_RATE_80_GBS) return IB_RATE_80_GBPS;\r
410         if (rate == IB_PATH_RECORD_RATE_120_GBS) return IB_RATE_120_GBPS;\r
411         return IB_RATE_PORT_CURRENT;\r
412 }\r
413 \r
414 uint8_t from_rate(enum ib_rate ib_rate)\r
415 {\r
416         if (ib_rate == IB_RATE_2_5_GBPS) return IB_PATH_RECORD_RATE_2_5_GBS;\r
417         if (ib_rate == IB_RATE_5_GBPS) return IB_PATH_RECORD_RATE_5_GBS;\r
418         if (ib_rate == IB_RATE_10_GBPS) return IB_PATH_RECORD_RATE_10_GBS;\r
419         if (ib_rate == IB_RATE_20_GBPS) return IB_PATH_RECORD_RATE_20_GBS;\r
420         if (ib_rate == IB_RATE_30_GBPS) return IB_PATH_RECORD_RATE_30_GBS;\r
421         if (ib_rate == IB_RATE_40_GBPS) return IB_PATH_RECORD_RATE_40_GBS;\r
422         if (ib_rate == IB_RATE_60_GBPS) return IB_PATH_RECORD_RATE_60_GBS;\r
423         if (ib_rate == IB_RATE_80_GBPS) return IB_PATH_RECORD_RATE_80_GBS;\r
424         if (ib_rate == IB_RATE_120_GBPS) return IB_PATH_RECORD_RATE_120_GBS;\r
425         return 0;\r
426 }\r
427 \r
428 int\r
429 to_av(\r
430         IN              const   struct ib_device        *p_ib_dev,\r
431         IN              const   ib_av_attr_t            *p_ib_av_attr,\r
432         OUT                     struct ib_ah_attr               *p_ib_ah_attr)\r
433 {\r
434         int err = 0;\r
435         u8 port_num;\r
436         u16 gid_index;\r
437         \r
438         p_ib_ah_attr->port_num = p_ib_av_attr->port_num;\r
439         p_ib_ah_attr->sl   = p_ib_av_attr->sl;\r
440         p_ib_ah_attr->dlid = cl_ntoh16(p_ib_av_attr->dlid);\r
441         p_ib_ah_attr->static_rate = to_rate(p_ib_av_attr->static_rate);\r
442         p_ib_ah_attr->src_path_bits = p_ib_av_attr->path_bits; // PATH:\r
443 \r
444         /* For global destination or Multicast address:*/\r
445         if (p_ib_av_attr->grh_valid) {\r
446                 p_ib_ah_attr->ah_flags |= IB_AH_GRH;\r
447                 p_ib_ah_attr->grh.hop_limit     = p_ib_av_attr->grh.hop_limit;\r
448                 ib_grh_get_ver_class_flow( p_ib_av_attr->grh.ver_class_flow, NULL,\r
449                         &p_ib_ah_attr->grh.traffic_class, &p_ib_ah_attr->grh.flow_label );\r
450                 err = p_ib_dev->x.find_cached_gid((struct ib_device *)p_ib_dev, \r
451                         (union ib_gid   *)p_ib_av_attr->grh.src_gid.raw, &port_num, &gid_index);\r
452                 if (err) {\r
453 \r
454                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
455                         ("ib_find_cached_gid failed %d (%#x). Using default:  sgid_index = 0\n", err, err));\r
456                         gid_index = 0;\r
457                 }\r
458                 else if (port_num != p_ib_ah_attr->port_num) {\r
459                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
460                                 ("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n", \r
461                                 (u32)port_num, (u32)p_ib_ah_attr->port_num));\r
462                 }\r
463                 p_ib_ah_attr->grh.sgid_index = (u8)gid_index;\r
464                 RtlCopyMemory(p_ib_ah_attr->grh.dgid.raw, \r
465                         p_ib_av_attr->grh.dest_gid.raw, sizeof(p_ib_ah_attr->grh.dgid));\r
466         }\r
467         else\r
468                 p_ib_ah_attr->ah_flags = 0;\r
469 \r
470         return err;\r
471 }\r
472 \r
473 int from_av(\r
474         IN              const   struct ib_device        *p_ib_dev,\r
475         IN                      struct ib_qp_attr               *p_ib_qp_attr,\r
476         IN                      struct ib_ah_attr               *p_ib_ah_attr,\r
477         OUT                     ib_av_attr_t                    *p_ib_av_attr)\r
478 {\r
479         int err = 0;\r
480         \r
481         p_ib_av_attr->port_num                                  = p_ib_ah_attr->port_num;\r
482         p_ib_av_attr->sl                                                = p_ib_ah_attr->sl;\r
483         p_ib_av_attr->dlid                                              = cl_hton16(p_ib_ah_attr->dlid);\r
484         p_ib_av_attr->static_rate                               = from_rate(p_ib_ah_attr->static_rate);\r
485         p_ib_av_attr->path_bits                                 = p_ib_ah_attr->src_path_bits;\r
486 \r
487         if (p_ib_qp_attr) {\r
488                 p_ib_av_attr->conn.path_mtu                             = p_ib_qp_attr->path_mtu; // MTU\r
489                 p_ib_av_attr->conn.local_ack_timeout    = p_ib_qp_attr->timeout; // MTU\r
490                 p_ib_av_attr->conn.seq_err_retry_cnt    = p_ib_qp_attr->retry_cnt; // MTU\r
491                 p_ib_av_attr->conn.rnr_retry_cnt                = p_ib_qp_attr->rnr_retry; // MTU\r
492         }\r
493 \r
494         if (p_ib_ah_attr->ah_flags & IB_AH_GRH) {\r
495                 p_ib_av_attr->grh_valid = TRUE;\r
496                 p_ib_av_attr->grh.hop_limit = p_ib_ah_attr->grh.hop_limit;\r
497                 p_ib_av_attr->grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
498                         0, p_ib_ah_attr->grh.traffic_class, p_ib_ah_attr->grh.flow_label );\r
499                 RtlCopyMemory(p_ib_av_attr->grh.dest_gid.raw, \r
500                         p_ib_ah_attr->grh.dgid.raw, sizeof(p_ib_av_attr->grh.dest_gid));\r
501                 err = p_ib_dev->x.get_cached_gid((struct ib_device *)p_ib_dev, \r
502                         p_ib_ah_attr->port_num, p_ib_ah_attr->grh.sgid_index,\r
503                         (union ib_gid*)p_ib_av_attr->grh.src_gid.raw );\r
504                 if (err) {\r
505 \r
506                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
507                         ("ib_get_cached_gid failed %d (%#x). Using default:  sgid_index = 0\n", err, err));\r
508                 }\r
509         }\r
510         else\r
511                 p_ib_av_attr->grh_valid = FALSE;\r
512                 \r
513 \r
514         return err;\r
515 }\r
516 \r
517 enum ib_access_flags\r
518 to_qp_acl(\r
519         IN                              ib_access_t                                     ibal_acl)\r
520 {\r
521 #define IBAL_ACL(ifl,mfl) if (ibal_acl & ifl) acc |= mfl\r
522         enum ib_access_flags acc = 0;\r
523 \r
524         IBAL_ACL(IB_AC_RDMA_READ,IB_ACCESS_REMOTE_READ);\r
525         IBAL_ACL(IB_AC_RDMA_WRITE,IB_ACCESS_REMOTE_WRITE);\r
526         IBAL_ACL(IB_AC_ATOMIC,IB_ACCESS_REMOTE_ATOMIC);\r
527         IBAL_ACL(IB_AC_LOCAL_WRITE,IB_ACCESS_LOCAL_WRITE);\r
528         IBAL_ACL(IB_AC_MW_BIND,IB_ACCESS_MW_BIND);\r
529 \r
530         return acc;\r
531 }\r
532 \r
533 ib_access_t\r
534 from_qp_acl(\r
535         IN              enum ib_access_flags                    acc)\r
536 {\r
537 #define IB_ACL(ifl,mfl) if (acc & ifl) ibal_acl |= mfl\r
538         ib_access_t ibal_acl = 0;\r
539 \r
540         IB_ACL(IB_ACCESS_REMOTE_READ,IB_AC_RDMA_READ);\r
541         IB_ACL(IB_ACCESS_REMOTE_WRITE,IB_AC_RDMA_WRITE);\r
542         IB_ACL(IB_ACCESS_REMOTE_ATOMIC,IB_AC_ATOMIC);\r
543         IB_ACL(IB_ACCESS_LOCAL_WRITE,IB_AC_LOCAL_WRITE);\r
544         IB_ACL(IB_ACCESS_MW_BIND,IB_AC_MW_BIND);\r
545 \r
546         return ibal_acl;\r
547 }\r
548 \r
549 static enum ib_qp_state to_qp_state(ib_qp_state_t ib_qps)\r
550 {\r
551 #define MAP_XIB_QPS(val1,val2) case val1: qps = val2; break\r
552         enum ib_qp_state qps;\r
553         switch (ib_qps) {\r
554                 MAP_XIB_QPS( IB_QPS_RESET, XIB_QPS_RESET );\r
555                 MAP_XIB_QPS( IB_QPS_INIT, XIB_QPS_INIT );\r
556                 MAP_XIB_QPS( IB_QPS_RTR, XIB_QPS_RTR );\r
557                 MAP_XIB_QPS( IB_QPS_RTS, XIB_QPS_RTS );\r
558                 MAP_XIB_QPS( IB_QPS_SQD, XIB_QPS_SQD );\r
559                 MAP_XIB_QPS( IB_QPS_SQD_DRAINING, XIB_QPS_SQD );\r
560                 MAP_XIB_QPS( IB_QPS_SQD_DRAINED, XIB_QPS_SQD );\r
561                 MAP_XIB_QPS( IB_QPS_SQERR, XIB_QPS_SQE );\r
562                 MAP_XIB_QPS( IB_QPS_ERROR, XIB_QPS_ERR );\r
563                 default:\r
564                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps));\r
565                         qps = 0xffffffff;\r
566         }\r
567         return qps;\r
568 }\r
569 \r
570 static ib_qp_state_t  from_qp_state(enum ib_qp_state qps, int draining)\r
571 {\r
572 #define MAP_IB_QPS(val1,val2) case val1: ib_qps = val2; break\r
573         ib_qp_state_t ib_qps;\r
574 \r
575         if (qps == XIB_QPS_SQD) {\r
576                 ib_qps = draining ? IB_QPS_SQD_DRAINING : IB_QPS_SQD;\r
577                 return ib_qps;\r
578         }\r
579 \r
580         switch (qps) {\r
581                 MAP_IB_QPS( XIB_QPS_RESET, IB_QPS_RESET );\r
582                 MAP_IB_QPS( XIB_QPS_INIT, IB_QPS_INIT );\r
583                 MAP_IB_QPS( XIB_QPS_RTR, IB_QPS_RTR );\r
584                 MAP_IB_QPS( XIB_QPS_RTS, IB_QPS_RTS );\r
585                 MAP_IB_QPS( XIB_QPS_SQE, IB_QPS_SQERR );\r
586                 MAP_IB_QPS( XIB_QPS_ERR, IB_QPS_ERROR );\r
587                 default:\r
588                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", qps));\r
589                         ib_qps = 0xffffffff;\r
590         }\r
591         return ib_qps;\r
592 }\r
593 \r
594 ib_api_status_t\r
595 to_qp_attr(\r
596         IN       const  struct ib_qp *p_ib_qp,\r
597         IN                              ib_qp_type_t    qp_type,\r
598         IN       const  ib_qp_mod_t *p_ib_qp_mod,               \r
599         OUT     struct ib_qp_attr *p_ib_qp_attr,\r
600         OUT     int *p_qp_attr_mask\r
601         )\r
602 {\r
603         int err;\r
604         ib_api_status_t         status = IB_SUCCESS;\r
605         struct mlx4_ib_qp *p_mib_qp = (struct mlx4_ib_qp *)p_ib_qp;\r
606 \r
607         RtlZeroMemory( p_ib_qp_attr, sizeof *p_ib_qp_attr );\r
608         *p_qp_attr_mask = IB_QP_STATE;\r
609         p_ib_qp_attr->qp_state = to_qp_state( p_ib_qp_mod->req_state ); \r
610 \r
611         // skipped cases\r
612         if (p_mib_qp->state == XIB_QPS_RESET && p_ib_qp_mod->req_state != IB_QPS_INIT)\r
613                 return IB_NOT_DONE;\r
614                 \r
615         switch (p_ib_qp_mod->req_state) {\r
616         case IB_QPS_RESET:\r
617         case IB_QPS_ERROR:\r
618         case IB_QPS_SQERR:\r
619         case IB_QPS_TIME_WAIT:\r
620                 break;\r
621 \r
622         case IB_QPS_INIT:\r
623                 \r
624                 switch (qp_type) {\r
625                         case IB_QPT_RELIABLE_CONN:\r
626                         case IB_QPT_UNRELIABLE_CONN:\r
627                                 *p_qp_attr_mask |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS;\r
628                                 p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.init.access_ctrl);\r
629                                 break;\r
630                         case IB_QPT_QP0:\r
631                         case IB_QPT_QP1:\r
632                                 // TODO: these cases had IB_QP_PORT in mthca\r
633                                 // TODO: they do not pass ib_modify_qp_is_ok control here\r
634                                 *p_qp_attr_mask |= IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
635                                 p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.init.qkey);\r
636                                 break;\r
637                         case IB_QPT_UNRELIABLE_DGRM:\r
638                         default:        \r
639                                 *p_qp_attr_mask |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
640                                 p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.init.qkey);\r
641                                 break;\r
642                 }                               \r
643                 \r
644                 // IB_QP_PORT\r
645                 p_ib_qp_attr->port_num    = p_ib_qp_mod->state.init.primary_port;\r
646 \r
647                 // IB_QP_PKEY_INDEX\r
648                 p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.init.pkey_index;\r
649 \r
650                 break;\r
651                 \r
652         case IB_QPS_RTR:\r
653                 /* modifying the WQE depth is not supported */\r
654                 if( p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
655                         p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )      {\r
656                         status = IB_UNSUPPORTED;\r
657                         break;\r
658                 }\r
659 \r
660                 switch (qp_type) {\r
661                         case IB_QPT_RELIABLE_CONN:\r
662                                 *p_qp_attr_mask |= /* required flags */\r
663                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC |\r
664                                         IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER;\r
665 \r
666                                 // IB_QP_DEST_QPN\r
667                                 p_ib_qp_attr->dest_qp_num               = cl_ntoh32 (p_ib_qp_mod->state.rtr.dest_qp);\r
668 \r
669                                 // IB_QP_RQ_PSN\r
670                                 p_ib_qp_attr->rq_psn                            = cl_ntoh32 (p_ib_qp_mod->state.rtr.rq_psn);\r
671                                 \r
672                                 // IB_QP_MAX_DEST_RD_ATOMIC\r
673                                 p_ib_qp_attr->max_dest_rd_atomic        = p_ib_qp_mod->state.rtr.resp_res;\r
674 \r
675                                 // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory)\r
676                                 err = to_av(p_ib_qp->device,\r
677                                         &p_ib_qp_mod->state.rtr.primary_av, &p_ib_qp_attr->ah_attr);\r
678                                 if (err) {\r
679                                         status = IB_ERROR;\r
680                                         break;\r
681                                 }\r
682                                 p_ib_qp_attr->path_mtu          = p_ib_qp_mod->state.rtr.primary_av.conn.path_mtu; // MTU\r
683                                 p_ib_qp_attr->timeout           = p_ib_qp_mod->state.rtr.primary_av.conn.local_ack_timeout; // MTU\r
684                                 p_ib_qp_attr->retry_cnt                 = p_ib_qp_mod->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU\r
685                                 p_ib_qp_attr->rnr_retry                 = p_ib_qp_mod->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU\r
686 \r
687                                 // IB_QP_MIN_RNR_TIMER, required in RTR, optional in RTS.\r
688                                 p_ib_qp_attr->min_rnr_timer      = p_ib_qp_mod->state.rtr.rnr_nak_timeout;\r
689 \r
690                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
691                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
692                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
693                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rtr.access_ctrl);\r
694                                 }\r
695 \r
696                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
697                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
698                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* required flag */\r
699                                         err = to_av(p_ib_qp->device,\r
700                                                 &p_ib_qp_mod->state.rtr.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
701                                         if (err) {\r
702                                                 status = IB_ERROR;\r
703                                                 break;\r
704                                         }\r
705                                         p_ib_qp_attr->alt_timeout                = p_ib_qp_mod->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
706                                         p_ib_qp_attr->alt_port_num              = p_ib_qp_mod->state.rtr.alternate_av.port_num;\r
707                                 }\r
708 \r
709                                 // IB_QP_PKEY_INDEX \r
710                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_PKEY) {\r
711                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
712                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rtr.pkey_index;\r
713                                 }\r
714                                 break;\r
715                                 \r
716                         case IB_QPT_UNRELIABLE_CONN:\r
717                                 *p_qp_attr_mask |= /* required flags */\r
718                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU;\r
719 \r
720                                 // IB_QP_DEST_QPN\r
721                                 p_ib_qp_attr->dest_qp_num               = cl_ntoh32 (p_ib_qp_mod->state.rtr.dest_qp);\r
722 \r
723                                 // IB_QP_RQ_PSN\r
724                                 p_ib_qp_attr->rq_psn                            = cl_ntoh32 (p_ib_qp_mod->state.rtr.rq_psn);\r
725 \r
726                                 // IB_QP_PATH_MTU\r
727                                 p_ib_qp_attr->path_mtu          = p_ib_qp_mod->state.rtr.primary_av.conn.path_mtu;\r
728 \r
729                                 // IB_QP_AV: Convert primary AV (mandatory)\r
730                                 err = to_av(p_ib_qp->device,\r
731                                         &p_ib_qp_mod->state.rtr.primary_av, &p_ib_qp_attr->ah_attr);\r
732                                 if (err) {\r
733                                         status = IB_ERROR;\r
734                                         break;\r
735                                 }\r
736 \r
737                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
738                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
739                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
740                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rtr.access_ctrl);\r
741                                 }\r
742 \r
743                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
744                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
745                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* required flag */\r
746                                         err = to_av(p_ib_qp->device,\r
747                                                 &p_ib_qp_mod->state.rtr.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
748                                         if (err) {\r
749                                                 status = IB_ERROR;\r
750                                                 break;\r
751                                         }\r
752                                         p_ib_qp_attr->alt_timeout               = p_ib_qp_mod->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
753                                         p_ib_qp_attr->alt_port_num              = p_ib_qp_mod->state.rtr.alternate_av.port_num;\r
754                                 }\r
755 \r
756                                 // IB_QP_PKEY_INDEX \r
757                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_PKEY) {\r
758                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
759                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rtr.pkey_index;\r
760                                 }\r
761                                 break;\r
762                                         \r
763                         case IB_QPT_UNRELIABLE_DGRM:\r
764                         case IB_QPT_QP0:\r
765                         case IB_QPT_QP1:\r
766                         default:        \r
767                                 // IB_QP_PKEY_INDEX \r
768                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_PKEY) {\r
769                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
770                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rtr.pkey_index;\r
771                                 }\r
772 \r
773                                 // IB_QP_QKEY\r
774                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_QKEY) {\r
775                                         *p_qp_attr_mask |= IB_QP_QKEY;  \r
776                                         p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.rtr.qkey);\r
777                                 }\r
778                                 break;\r
779                                 \r
780                 }\r
781                 break;\r
782                 \r
783         case IB_QPS_RTS:\r
784                 /* modifying the WQE depth is not supported */\r
785                 if( p_ib_qp_mod->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
786                         p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
787                 {\r
788                         status = IB_UNSUPPORTED;\r
789                         break;\r
790                 }\r
791 \r
792                 switch (qp_type) {\r
793                         case IB_QPT_RELIABLE_CONN:\r
794                                 if (p_mib_qp->state != XIB_QPS_RTS)\r
795                                         *p_qp_attr_mask |= /* required flags */\r
796                                                 IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT |\r
797                                                 IB_QP_RETRY_CNT |IB_QP_RNR_RETRY;\r
798 \r
799                                 // IB_QP_MAX_QP_RD_ATOMIC\r
800                                 p_ib_qp_attr->max_rd_atomic     = p_ib_qp_mod->state.rts.init_depth;\r
801 \r
802                                 // IB_QP_TIMEOUT\r
803                                 p_ib_qp_attr->timeout            = p_ib_qp_mod->state.rts.local_ack_timeout; // XXX: conv\r
804                                 \r
805                                 // IB_QP_RETRY_CNT\r
806                                 p_ib_qp_attr->retry_cnt = p_ib_qp_mod->state.rts.retry_cnt;\r
807                                 \r
808                                 // IB_QP_RNR_RETRY\r
809                                 p_ib_qp_attr->rnr_retry  = p_ib_qp_mod->state.rts.rnr_retry_cnt;\r
810 \r
811                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
812                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
813                                         *p_qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
814                                         p_ib_qp_attr->max_dest_rd_atomic = p_ib_qp_mod->state.rts.resp_res;\r
815                                 }\r
816 \r
817 #ifdef WIN_TO_BE_REMOVED\r
818                 //TODO: do we need that ?\r
819                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
820 \r
821                                 // IB_QP_PKEY_INDEX \r
822                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_PKEY) {\r
823                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
824                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rts.pkey_index;\r
825                                 }\r
826 #endif                          \r
827 \r
828                                 // IB_QP_MIN_RNR_TIMER\r
829                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
830                                         *p_qp_attr_mask |= IB_QP_MIN_RNR_TIMER; \r
831                                         p_ib_qp_attr->min_rnr_timer      = p_ib_qp_mod->state.rts.rnr_nak_timeout;\r
832                                 }\r
833 \r
834                                 // IB_QP_PATH_MIG_STATE\r
835                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
836                                         *p_qp_attr_mask |= IB_QP_PATH_MIG_STATE;        \r
837                                         p_ib_qp_attr->path_mig_state =  p_ib_qp_mod->state.rts.apm_state;\r
838                                 }\r
839 \r
840                                 // IB_QP_ACCESS_FLAGS\r
841                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
842                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
843                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rts.access_ctrl);\r
844                                 }\r
845 \r
846                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
847                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
848                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* optional flag */\r
849                                         err = to_av(p_ib_qp->device,\r
850                                                 &p_ib_qp_mod->state.rts.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
851                                         if (err) {\r
852                                                 status = IB_ERROR;\r
853                                                 break;\r
854                                         }\r
855                                         p_ib_qp_attr->alt_timeout                = p_ib_qp_mod->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
856                                 }\r
857                                 break;\r
858                                 \r
859                         case IB_QPT_UNRELIABLE_CONN:\r
860                                 if (p_mib_qp->state != XIB_QPS_RTS)\r
861                                         *p_qp_attr_mask |= /* required flags */\r
862                                                 IB_QP_SQ_PSN;\r
863 \r
864                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
865                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
866                                         *p_qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
867                                         p_ib_qp_attr->max_dest_rd_atomic = p_ib_qp_mod->state.rts.resp_res;\r
868                                 }\r
869 \r
870 #ifdef WIN_TO_BE_REMOVED\r
871                 //TODO: do we need that ?\r
872                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
873 \r
874                                 // IB_QP_PKEY_INDEX \r
875                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_PKEY) {\r
876                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
877                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rts.pkey_index;\r
878                                 }\r
879 #endif                          \r
880 \r
881                                 // IB_QP_PATH_MIG_STATE\r
882                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
883                                         *p_qp_attr_mask |= IB_QP_PATH_MIG_STATE;        \r
884                                         p_ib_qp_attr->path_mig_state =  p_ib_qp_mod->state.rts.apm_state;\r
885                                 }\r
886 \r
887                                 // IB_QP_ACCESS_FLAGS\r
888                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
889                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
890                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rts.access_ctrl);\r
891                                 }\r
892 \r
893                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
894                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
895                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* optional flag */\r
896                                         err = to_av(p_ib_qp->device,\r
897                                                 &p_ib_qp_mod->state.rts.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
898                                         if (err) {\r
899                                                 status = IB_ERROR;\r
900                                                 break;\r
901                                         }\r
902                                 }\r
903                                 break;\r
904                                         \r
905                         case IB_QPT_UNRELIABLE_DGRM:\r
906                         case IB_QPT_QP0:\r
907                         case IB_QPT_QP1:\r
908                         default:        \r
909                                 if (p_mib_qp->state != XIB_QPS_RTS)\r
910                                         *p_qp_attr_mask |= /* required flags */\r
911                                                 IB_QP_SQ_PSN;\r
912 \r
913                                 // IB_QP_QKEY\r
914                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_QKEY) {\r
915                                         *p_qp_attr_mask |= IB_QP_QKEY;  \r
916                                         p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.rts.qkey);\r
917                                 }\r
918                                 break;\r
919                                 \r
920                                 break;\r
921                                 \r
922                 }\r
923 \r
924                 // IB_QP_SQ_PSN: common for all\r
925                 p_ib_qp_attr->sq_psn = cl_ntoh32 (p_ib_qp_mod->state.rts.sq_psn);\r
926                 //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL\r
927                 break;\r
928                 \r
929         case IB_QPS_SQD:\r
930         case IB_QPS_SQD_DRAINING:\r
931         case IB_QPS_SQD_DRAINED:\r
932                 *p_qp_attr_mask |= IB_QP_EN_SQD_ASYNC_NOTIFY;\r
933                 p_ib_qp_attr->en_sqd_async_notify = (u8)p_ib_qp_mod->state.sqd.sqd_event;\r
934                 HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n"));\r
935                 break;\r
936                 \r
937         default:        \r
938                 //NB: is this an error case and we need this message  ? What about returning an error ?\r
939                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", p_ib_qp_mod->req_state));\r
940                 break;\r
941                 \r
942         }\r
943 \r
944         return status;\r
945 }       \r
946 \r
947 enum ib_qp_type to_qp_type(ib_qp_type_t qp_type)\r
948 {\r
949 #define MAP_TYPE(val1,val2) case val1: ib_qp_type = val2; break\r
950         enum ib_qp_type ib_qp_type;\r
951 \r
952         switch (qp_type) {\r
953                 MAP_TYPE( IB_QPT_RELIABLE_CONN, IB_QPT_RC );\r
954                 MAP_TYPE( IB_QPT_UNRELIABLE_CONN, IB_QPT_UC );\r
955                 MAP_TYPE( IB_QPT_UNRELIABLE_DGRM, IB_QPT_UD );\r
956                 MAP_TYPE( IB_QPT_QP0, IB_QPT_SMI );\r
957                 MAP_TYPE( IB_QPT_QP1, IB_QPT_GSI );\r
958                 MAP_TYPE( IB_QPT_RAW_IPV6, IB_QPT_RAW_IP_V6 );\r
959                 MAP_TYPE( IB_QPT_RAW_ETHER, IB_QPT_RAW_ETY );\r
960                 default:\r
961                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
962                                 ("Unmapped MLX4 ib_wc_type %d\n", qp_type));\r
963                         ib_qp_type = 0xffffffff;\r
964         }\r
965         return ib_qp_type;\r
966 }\r
967 \r
968 ib_qp_type_t  from_qp_type(enum ib_qp_type ib_qp_type)\r
969 {\r
970 #define MAP_IB_TYPE(val1,val2) case val1: qp_type = val2; break\r
971         ib_qp_type_t qp_type;\r
972 \r
973         switch (ib_qp_type) {\r
974                 MAP_IB_TYPE( IB_QPT_RC, IB_QPT_RELIABLE_CONN );\r
975                 MAP_IB_TYPE( IB_QPT_UC, IB_QPT_UNRELIABLE_CONN );\r
976                 MAP_IB_TYPE( IB_QPT_UD, IB_QPT_UNRELIABLE_DGRM );\r
977                 MAP_IB_TYPE( IB_QPT_SMI, IB_QPT_QP0 );\r
978                 MAP_IB_TYPE( IB_QPT_GSI, IB_QPT_QP1 );\r
979                 MAP_IB_TYPE( IB_QPT_RAW_IP_V6, IB_QPT_RAW_IPV6 );\r
980                 MAP_IB_TYPE( IB_QPT_RAW_ETY, IB_QPT_RAW_ETHER );\r
981                 default:\r
982                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
983                                 ("Unmapped MLX4 ib_wc_type %d\n", ib_qp_type));\r
984                         qp_type = 0xffffffff;\r
985         }\r
986         return qp_type;\r
987 }\r
988 \r
989 ib_apm_state_t from_apm_state(enum ib_mig_state apm)\r
990 {\r
991         if (apm == IB_MIG_MIGRATED) return IB_APM_MIGRATED;\r
992         if (apm == IB_MIG_REARM) return IB_APM_REARM;\r
993         if (apm == IB_MIG_ARMED) return IB_APM_ARMED;\r
994         return 0xffffffff;\r
995 }\r
996 \r
997 ib_api_status_t\r
998 from_qp_attr(\r
999         IN       const  struct ib_qp    *p_ib_qp,\r
1000         IN      struct ib_qp_attr               *p_ib_qp_attr,\r
1001         OUT     ib_qp_attr_t            *p_qp_attr\r
1002         )\r
1003 {\r
1004         int err;\r
1005         RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
1006         p_qp_attr->h_pd = (ib_pd_handle_t)p_ib_qp->pd;\r
1007         p_qp_attr->qp_type = from_qp_type(p_ib_qp->qp_type);\r
1008         p_qp_attr->access_ctrl = from_qp_acl(p_ib_qp_attr->qp_access_flags);\r
1009         p_qp_attr->pkey_index = p_ib_qp_attr->pkey_index;\r
1010 \r
1011         p_qp_attr->sq_max_inline = p_ib_qp_attr->cap.max_inline_data;\r
1012         p_qp_attr->sq_depth = p_ib_qp_attr->cap.max_send_wr;\r
1013         p_qp_attr->rq_depth = p_ib_qp_attr->cap.max_recv_wr;\r
1014         p_qp_attr->sq_sge = p_ib_qp_attr->cap.max_send_sge;\r
1015         p_qp_attr->rq_sge = p_ib_qp_attr->cap.max_recv_sge;\r
1016         p_qp_attr->init_depth = p_ib_qp_attr->max_rd_atomic;\r
1017         p_qp_attr->resp_res = p_ib_qp_attr->max_dest_rd_atomic;\r
1018 \r
1019         p_qp_attr->h_sq_cq = (ib_cq_handle_t)p_ib_qp->send_cq;\r
1020         p_qp_attr->h_rq_cq = (ib_cq_handle_t)p_ib_qp->recv_cq;\r
1021         p_qp_attr->h_srq = (ib_srq_handle_t)p_ib_qp->srq;\r
1022 \r
1023         p_qp_attr->sq_signaled = !!((struct mlx4_ib_qp *)p_ib_qp)->sq_signal_bits;\r
1024 \r
1025         p_qp_attr->state = from_qp_state( p_ib_qp_attr->qp_state, \r
1026                 p_ib_qp_attr->sq_draining);\r
1027         p_qp_attr->num = cl_hton32(p_ib_qp->qp_num);\r
1028         p_qp_attr->dest_num = cl_hton32(p_ib_qp_attr->dest_qp_num);\r
1029         p_qp_attr->qkey = cl_hton32(p_ib_qp_attr->qkey);\r
1030 \r
1031         p_qp_attr->sq_psn = cl_hton32(p_ib_qp_attr->sq_psn);\r
1032         p_qp_attr->rq_psn = cl_hton32(p_ib_qp_attr->rq_psn);\r
1033 \r
1034         p_qp_attr->primary_port = p_ib_qp_attr->port_num;\r
1035         p_qp_attr->alternate_port = p_ib_qp_attr->alt_port_num;\r
1036         err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->ah_attr, &p_qp_attr->primary_av);\r
1037         if (err)\r
1038                 goto err_av;\r
1039         err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->alt_ah_attr, &p_qp_attr->alternate_av);\r
1040         if (err)\r
1041                 goto err_av;\r
1042         p_qp_attr->apm_state = from_apm_state(p_ib_qp_attr->path_mig_state);\r
1043 \r
1044         return IB_SUCCESS;\r
1045 \r
1046 err_av:\r
1047         return errno_to_iberr(err);\r
1048 }\r
1049 \r