ae97df32e29034c11bb44cf8f21b91fb98735f41
[mirror/winof/.git] / hw / mlx4 / kernel / hca / data.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id: data.c 1944 2007-02-12 16:16:00Z sleybo $\r
31  */\r
32 \r
33 \r
34 #include "precomp.h"\r
35 \r
36 #if defined(EVENT_TRACING)\r
37 #ifdef offsetof\r
38 #undef offsetof\r
39 #endif\r
40 #include "data.tmh"\r
41 #endif\r
42 \r
43 static cl_spinlock_t    hca_lock;\r
44 \r
45 \r
46 \r
47 uint32_t                g_mlnx_dpc2thread = 0;\r
48 \r
49 \r
50 cl_qlist_t              mlnx_hca_list;\r
51 \r
52 /////////////////////////////////////////////////////////\r
53 // ### HCA\r
54 /////////////////////////////////////////////////////////\r
55 void\r
56 mlnx_hca_insert(\r
57         IN                              mlnx_hca_t                                      *p_hca )\r
58 {\r
59         cl_spinlock_acquire( &hca_lock );\r
60         cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
61         cl_spinlock_release( &hca_lock );\r
62 }\r
63 \r
64 void\r
65 mlnx_hca_remove(\r
66         IN                              mlnx_hca_t                                      *p_hca )\r
67 {\r
68         cl_spinlock_acquire( &hca_lock );\r
69         cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
70         cl_spinlock_release( &hca_lock );\r
71 }\r
72 \r
73 mlnx_hca_t*\r
74 mlnx_hca_from_guid(\r
75         IN                              ib_net64_t                                      guid )\r
76 {\r
77         cl_list_item_t  *p_item;\r
78         mlnx_hca_t              *p_hca = NULL;\r
79 \r
80         cl_spinlock_acquire( &hca_lock );\r
81         p_item = cl_qlist_head( &mlnx_hca_list );\r
82         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
83         {\r
84                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
85                 if( p_hca->guid == guid )\r
86                         break;\r
87                 p_item = cl_qlist_next( p_item );\r
88                 p_hca = NULL;\r
89         }\r
90         cl_spinlock_release( &hca_lock );\r
91         return p_hca;\r
92 }\r
93 \r
94 /////////////////////////////////////////////////////////\r
95 // ### HCA\r
96 /////////////////////////////////////////////////////////\r
97 cl_status_t\r
98 mlnx_hcas_init( void )\r
99 {\r
100         cl_qlist_init( &mlnx_hca_list );\r
101         return cl_spinlock_init( &hca_lock );\r
102 }\r
103 \r
104 \r
105 /////////////////////////////////////////////////////////\r
106 /////////////////////////////////////////////////////////\r
107 ib_api_status_t\r
108 mlnx_set_cb(\r
109         IN                              mlnx_hca_t                              *       p_hca, \r
110         IN                              ci_completion_cb_t                      comp_cb_p,\r
111         IN                              ci_async_event_cb_t                     async_cb_p,\r
112         IN              const   void* const                                     ib_context)\r
113 {\r
114         cl_status_t             cl_status;\r
115 \r
116         // Setup the callbacks\r
117         if (!p_hca->async_proc_mgr_p)\r
118         {\r
119                 p_hca->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
120                 if( !p_hca->async_proc_mgr_p )\r
121                 {\r
122                         return IB_INSUFFICIENT_MEMORY;\r
123                 }\r
124                 cl_async_proc_construct( p_hca->async_proc_mgr_p );\r
125                 cl_status = cl_async_proc_init( p_hca->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
126                 if( cl_status != CL_SUCCESS )\r
127                 {\r
128                         cl_async_proc_destroy( p_hca->async_proc_mgr_p );\r
129                         cl_free(p_hca->async_proc_mgr_p);\r
130                         p_hca->async_proc_mgr_p = NULL;\r
131                         return IB_INSUFFICIENT_RESOURCES;\r
132                 }\r
133         }\r
134 \r
135         p_hca->comp_cb_p        = comp_cb_p;\r
136         p_hca->async_cb_p = async_cb_p;\r
137         p_hca->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
138         return IB_SUCCESS;\r
139 }\r
140 \r
141 /////////////////////////////////////////////////////////\r
142 /////////////////////////////////////////////////////////\r
143 void\r
144 mlnx_reset_cb(\r
145         IN                              mlnx_hca_t                              *       p_hca)\r
146 {\r
147         cl_async_proc_t *p_async_proc;\r
148 \r
149 \r
150         cl_spinlock_acquire( &hca_lock );\r
151 \r
152         p_async_proc = p_hca->async_proc_mgr_p;\r
153         p_hca->async_proc_mgr_p = NULL;\r
154 \r
155         p_hca->comp_cb_p = NULL;\r
156         p_hca->async_cb_p = NULL;\r
157         p_hca->ca_context = NULL;\r
158         p_hca->cl_device_h = NULL;\r
159 \r
160         cl_spinlock_release( &hca_lock );\r
161 \r
162         if( p_async_proc )\r
163         {\r
164                 cl_async_proc_destroy( p_async_proc );\r
165                 cl_free( p_async_proc );\r
166         }\r
167 \r
168 }\r
169 \r
170 /////////////////////////////////////////////////////////\r
171 void\r
172 from_port_cap(\r
173         IN                              u32                     mthca_port_cap,\r
174                 OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
175 {\r
176 #define SET_CAP(flag,cap)       if (mthca_port_cap & flag) ibal_port_cap_p->cap = TRUE\r
177 \r
178         SET_CAP(IB_PORT_CM_SUP,cm);\r
179         SET_CAP(IB_PORT_SNMP_TUNNEL_SUP,snmp);\r
180         SET_CAP(IB_PORT_DEVICE_MGMT_SUP,dev_mgmt);\r
181         SET_CAP(IB_PORT_VENDOR_CLASS_SUP,vend);\r
182         SET_CAP(IB_PORT_SM_DISABLED,sm_disable);\r
183         SET_CAP(IB_PORT_SM,sm);\r
184         SET_CAP(IB_PORT_NOTICE_SUP,notice);\r
185         SET_CAP(IB_PORT_TRAP_SUP,trap);\r
186         SET_CAP(IB_PORT_AUTO_MIGR_SUP,apm);\r
187         SET_CAP(IB_PORT_SL_MAP_SUP,slmap);\r
188         SET_CAP(IB_PORT_LED_INFO_SUP,ledinfo);\r
189         SET_CAP(IB_PORT_CAP_MASK_NOTICE_SUP,capm_notice);\r
190         SET_CAP(IB_PORT_CLIENT_REG_SUP,client_reregister);\r
191         SET_CAP(IB_PORT_SYS_IMAGE_GUID_SUP,sysguid);\r
192         SET_CAP(IB_PORT_BOOT_MGMT_SUP,boot_mgmt);\r
193         SET_CAP(IB_PORT_DR_NOTICE_SUP,dr_notice);\r
194         SET_CAP(IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP,pkey_switch_ext_port);\r
195         SET_CAP(IB_PORT_LINK_LATENCY_SUP,link_rtl);\r
196         SET_CAP(IB_PORT_REINIT_SUP,reinit);\r
197         SET_CAP(IB_PORT_OPT_IPD_SUP,ipd);\r
198         SET_CAP(IB_PORT_MKEY_NVRAM,mkey_nvram);\r
199         SET_CAP(IB_PORT_PKEY_NVRAM,pkey_nvram);\r
200         // there no MTHCA flags for qkey_ctr, pkey_ctr, port_active, bm IBAL capabilities;\r
201 }\r
202 \r
203 \r
204 /////////////////////////////////////////////////////////\r
205 void\r
206 from_hca_cap(\r
207         IN                              struct ib_device *ib_dev,\r
208         IN                              struct ib_device_attr *hca_info_p,\r
209         IN                              struct ib_port_attr  *hca_ports,\r
210         OUT                     ib_ca_attr_t                            *ca_attr_p)\r
211 {\r
212         uint8_t                 port_num;\r
213         ib_port_attr_t  *ibal_port_p;\r
214         struct ib_port_attr  *mthca_port_p;\r
215 \r
216         ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
217         ca_attr_p->dev_id   = (uint16_t)hca_info_p->vendor_part_id;\r
218         ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
219         ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
220         ca_attr_p->ca_guid   = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid;\r
221         ca_attr_p->num_ports = ib_dev->phys_port_cnt;\r
222         ca_attr_p->max_qps   = hca_info_p->max_qp;\r
223         ca_attr_p->max_wrs   = hca_info_p->max_qp_wr;\r
224         ca_attr_p->max_sges   = hca_info_p->max_sge;\r
225         ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd;\r
226         ca_attr_p->max_cqs    = hca_info_p->max_cq;\r
227         ca_attr_p->max_cqes  = hca_info_p->max_cqe;\r
228         ca_attr_p->max_pds    = hca_info_p->max_pd;\r
229         ca_attr_p->init_regions = hca_info_p->max_mr;\r
230         ca_attr_p->init_windows = hca_info_p->max_mw;\r
231         ca_attr_p->init_region_size = hca_info_p->max_mr_size;\r
232         ca_attr_p->max_addr_handles = hca_info_p->max_ah;\r
233         ca_attr_p->atomicity     = hca_info_p->atomic_cap;\r
234         ca_attr_p->max_partitions = hca_info_p->max_pkeys;\r
235         ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom;\r
236         ca_attr_p->max_resp_res    = (uint8_t)hca_info_p->max_res_rd_atom;\r
237         ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom;\r
238         ca_attr_p->max_ipv6_qps    = hca_info_p->max_raw_ipv6_qp;\r
239         ca_attr_p->max_ether_qps   = hca_info_p->max_raw_ethy_qp;\r
240         ca_attr_p->max_mcast_grps  = hca_info_p->max_mcast_grp;\r
241         ca_attr_p->max_mcast_qps   = hca_info_p->max_total_mcast_qp_attach;\r
242         ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
243         ca_attr_p->max_fmr   = hca_info_p->max_fmr;\r
244         ca_attr_p->max_map_per_fmr   = hca_info_p->max_map_per_fmr;\r
245         ca_attr_p->max_srq = hca_info_p->max_srq;\r
246         ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr;\r
247         ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge;\r
248 \r
249         ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
250         ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
251         ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
252         ca_attr_p->raw_mcast_support    = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI;\r
253         ca_attr_p->apm_support          = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG;\r
254         ca_attr_p->av_port_check        = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
255         ca_attr_p->change_primary_port  = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
256         ca_attr_p->modify_wr_depth      = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
257         ca_attr_p->modify_srq_depth      = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE;\r
258         ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
259 \r
260         ca_attr_p->num_page_sizes = 1;\r
261         ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap\r
262 \r
263         for (port_num = 0; port_num <= (end_port(ib_dev) - start_port(ib_dev)); ++port_num)\r
264         {\r
265                 // Setup port pointers\r
266                 ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
267                 mthca_port_p = &hca_ports[port_num];\r
268 \r
269                 // Port Cabapilities\r
270                 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
271                 from_port_cap(mthca_port_p->port_cap_flags, &ibal_port_p->cap);\r
272 \r
273                 // Port Atributes\r
274                 ibal_port_p->port_num   = (u8)(port_num + start_port(ib_dev));\r
275                 ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
276                 ibal_port_p->lid        = cl_ntoh16(mthca_port_p->lid);\r
277                 ibal_port_p->lmc        = mthca_port_p->lmc;\r
278                 ibal_port_p->max_vls    = mthca_port_p->max_vl_num;\r
279                 ibal_port_p->sm_lid     = cl_ntoh16(mthca_port_p->sm_lid);\r
280                 ibal_port_p->sm_sl      = mthca_port_p->sm_sl;\r
281                 ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN;\r
282                 ibal_port_p->num_gids   = (uint16_t)mthca_port_p->gid_tbl_len;\r
283                 ibal_port_p->num_pkeys  = mthca_port_p->pkey_tbl_len;\r
284                 ibal_port_p->pkey_ctr   = (uint16_t)mthca_port_p->bad_pkey_cntr;\r
285                 ibal_port_p->qkey_ctr   = (uint16_t)mthca_port_p->qkey_viol_cntr;\r
286                 ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz;\r
287                 ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu;\r
288                 ibal_port_p->active_speed = mthca_port_p->active_speed;\r
289                 ibal_port_p->phys_state = mthca_port_p->phys_state;\r
290 \r
291                 ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;\r
292                 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
293                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",\r
294                         ibal_port_p->port_num, cl_ntoh64(ibal_port_p->port_guid)));\r
295         }\r
296 }\r
297 \r
298 void cq_comp_handler(struct ib_cq *cq, void *context)\r
299 {\r
300         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
301         struct ib_cq *p_ib_cq =(struct ib_cq *)cq; \r
302         HCA_ENTER(HCA_DBG_CQ);\r
303         if (p_hca && p_hca->comp_cb_p) {\r
304                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));\r
305                 (p_hca->comp_cb_p)(p_ib_cq->x.ctx);\r
306         }\r
307         else {\r
308                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));\r
309         }\r
310         HCA_EXIT(HCA_DBG_CQ);\r
311 }\r
312 \r
313 void ca_event_handler(struct ib_event *ev, void *context)\r
314 {\r
315         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
316         ib_event_rec_t event_rec;\r
317 \r
318         // prepare parameters\r
319         event_rec.context = (void *)p_hca->ca_context;\r
320         event_rec.trap.info.port_num = ev->element.port_num;\r
321         event_rec.type = ev->event;\r
322         if (event_rec.type > IB_AE_UNKNOWN) {\r
323                 // CL_ASSERT(0); // This shouldn't happen\r
324                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", \r
325                         event_rec.type, IB_AE_LOCAL_FATAL));\r
326                 event_rec.type = IB_AE_LOCAL_FATAL;\r
327         }\r
328 \r
329         // call the user callback\r
330         if (p_hca && p_hca->async_cb_p)\r
331                 (p_hca->async_cb_p)(&event_rec);\r
332         else {\r
333                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
334         }\r
335 }\r
336 \r
337 void srq_event_handler(struct ib_event *ev, void *context)\r
338 {\r
339         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
340         ib_event_rec_t event_rec;\r
341         struct ib_srq *p_srq;\r
342 \r
343         // prepare parameters\r
344         event_rec.type = ev->event;\r
345         // TODO: who fills x.vendor_specific\r
346         event_rec.vendor_specific = ev->x.vendor_specific;\r
347         p_srq = (struct ib_srq *)ev->element.srq;\r
348         event_rec.context = p_srq->x.ctx;\r
349 \r
350         // call the user callback\r
351         if (p_hca)\r
352                 (p_hca->async_cb_p)(&event_rec);\r
353         else {\r
354                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
355         }\r
356 }\r
357 \r
358 \r
359 void qp_event_handler(struct ib_event *ev, void *context)\r
360 {\r
361         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
362         ib_event_rec_t event_rec;\r
363         struct ib_qp *p_ib_qp;\r
364 \r
365         // prepare parameters\r
366         event_rec.type = ev->event;\r
367         event_rec.vendor_specific = ev->x.vendor_specific;\r
368         p_ib_qp = (struct ib_qp *)ev->element.qp;\r
369         event_rec.context = p_ib_qp->x.ctx;\r
370 \r
371         // call the user callback\r
372         if (p_hca)\r
373                 (p_hca->async_cb_p)(&event_rec);\r
374         else {\r
375                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
376         }\r
377 }\r
378 \r
379 void cq_event_handler(struct ib_event *ev, void *context)\r
380 {\r
381         mlnx_hca_t *p_hca = (mlnx_hca_t *)context;\r
382         ib_event_rec_t event_rec;\r
383         struct ib_cq *p_ib_cq;\r
384 \r
385         // prepare parameters\r
386         event_rec.type = ev->event;\r
387         p_ib_cq = (struct ib_cq *)ev->element.cq;\r
388         event_rec.context = p_ib_cq->x.ctx;\r
389 \r
390         // call the user callback\r
391         if (p_hca)\r
392                 (p_hca->async_cb_p)(&event_rec);\r
393         else {\r
394                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
395         }\r
396 }\r
397 \r
398 enum ib_rate to_rate(uint8_t rate)\r
399 {\r
400         if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IB_RATE_2_5_GBPS;\r
401         if (rate == IB_PATH_RECORD_RATE_5_GBS) return IB_RATE_5_GBPS;\r
402         if (rate == IB_PATH_RECORD_RATE_10_GBS) return IB_RATE_10_GBPS;\r
403         if (rate == IB_PATH_RECORD_RATE_20_GBS) return IB_RATE_20_GBPS;\r
404         if (rate == IB_PATH_RECORD_RATE_30_GBS) return IB_RATE_30_GBPS;\r
405         if (rate == IB_PATH_RECORD_RATE_40_GBS) return IB_RATE_40_GBPS;\r
406         if (rate == IB_PATH_RECORD_RATE_60_GBS) return IB_RATE_60_GBPS;\r
407         if (rate == IB_PATH_RECORD_RATE_80_GBS) return IB_RATE_80_GBPS;\r
408         if (rate == IB_PATH_RECORD_RATE_120_GBS) return IB_RATE_120_GBPS;\r
409         return IB_RATE_PORT_CURRENT;\r
410 }\r
411 \r
412 uint8_t from_rate(enum ib_rate ib_rate)\r
413 {\r
414         if (ib_rate == IB_RATE_2_5_GBPS) return IB_PATH_RECORD_RATE_2_5_GBS;\r
415         if (ib_rate == IB_RATE_5_GBPS) return IB_PATH_RECORD_RATE_5_GBS;\r
416         if (ib_rate == IB_RATE_10_GBPS) return IB_PATH_RECORD_RATE_10_GBS;\r
417         if (ib_rate == IB_RATE_20_GBPS) return IB_PATH_RECORD_RATE_20_GBS;\r
418         if (ib_rate == IB_RATE_30_GBPS) return IB_PATH_RECORD_RATE_30_GBS;\r
419         if (ib_rate == IB_RATE_40_GBPS) return IB_PATH_RECORD_RATE_40_GBS;\r
420         if (ib_rate == IB_RATE_60_GBPS) return IB_PATH_RECORD_RATE_60_GBS;\r
421         if (ib_rate == IB_RATE_80_GBPS) return IB_PATH_RECORD_RATE_80_GBS;\r
422         if (ib_rate == IB_RATE_120_GBPS) return IB_PATH_RECORD_RATE_120_GBS;\r
423         return 0;\r
424 }\r
425 \r
426 int\r
427 to_av(\r
428         IN              const   struct ib_device        *p_ib_dev,\r
429         IN              const   ib_av_attr_t            *p_ib_av_attr,\r
430         OUT                     struct ib_ah_attr               *p_ib_ah_attr)\r
431 {\r
432         int err = 0;\r
433         u8 port_num;\r
434         u16 gid_index;\r
435         \r
436         p_ib_ah_attr->port_num = p_ib_av_attr->port_num;\r
437         p_ib_ah_attr->sl   = p_ib_av_attr->sl;\r
438         p_ib_ah_attr->dlid = cl_ntoh16(p_ib_av_attr->dlid);\r
439         p_ib_ah_attr->static_rate = to_rate(p_ib_av_attr->static_rate);\r
440         p_ib_ah_attr->src_path_bits = p_ib_av_attr->path_bits; // PATH:\r
441 \r
442         /* For global destination or Multicast address:*/\r
443         if (p_ib_av_attr->grh_valid) {\r
444                 p_ib_ah_attr->ah_flags |= IB_AH_GRH;\r
445                 p_ib_ah_attr->grh.hop_limit     = p_ib_av_attr->grh.hop_limit;\r
446                 ib_grh_get_ver_class_flow( p_ib_av_attr->grh.ver_class_flow, NULL,\r
447                         &p_ib_ah_attr->grh.traffic_class, &p_ib_ah_attr->grh.flow_label );\r
448                 err = p_ib_dev->x.find_cached_gid((struct ib_device *)p_ib_dev, \r
449                         (union ib_gid   *)p_ib_av_attr->grh.src_gid.raw, &port_num, &gid_index);\r
450                 if (err) {\r
451 \r
452                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
453                         ("ib_find_cached_gid failed %d (%#x). Using default:  sgid_index = 0\n", err, err));\r
454                         gid_index = 0;\r
455                 }\r
456                 else if (port_num != p_ib_ah_attr->port_num) {\r
457                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
458                                 ("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n", \r
459                                 (u32)port_num, (u32)p_ib_ah_attr->port_num));\r
460                 }\r
461                 p_ib_ah_attr->grh.sgid_index = (u8)gid_index;\r
462                 RtlCopyMemory(p_ib_ah_attr->grh.dgid.raw, \r
463                         p_ib_av_attr->grh.dest_gid.raw, sizeof(p_ib_ah_attr->grh.dgid));\r
464         }\r
465         else\r
466                 p_ib_ah_attr->ah_flags = 0;\r
467 \r
468         return err;\r
469 }\r
470 \r
471 int from_av(\r
472         IN              const   struct ib_device        *p_ib_dev,\r
473         IN                      struct ib_qp_attr               *p_ib_qp_attr,\r
474         IN                      struct ib_ah_attr               *p_ib_ah_attr,\r
475         OUT                     ib_av_attr_t                    *p_ib_av_attr)\r
476 {\r
477         int err = 0;\r
478         \r
479         p_ib_av_attr->port_num                                  = p_ib_ah_attr->port_num;\r
480         p_ib_av_attr->sl                                                = p_ib_ah_attr->sl;\r
481         p_ib_av_attr->dlid                                              = cl_hton16(p_ib_ah_attr->dlid);\r
482         p_ib_av_attr->static_rate                               = from_rate(p_ib_ah_attr->static_rate);\r
483         p_ib_av_attr->path_bits                                 = p_ib_ah_attr->src_path_bits;\r
484 \r
485         if (p_ib_qp_attr) {\r
486                 p_ib_av_attr->conn.path_mtu                             = p_ib_qp_attr->path_mtu; // MTU\r
487                 p_ib_av_attr->conn.local_ack_timeout    = p_ib_qp_attr->timeout; // MTU\r
488                 p_ib_av_attr->conn.seq_err_retry_cnt    = p_ib_qp_attr->retry_cnt; // MTU\r
489                 p_ib_av_attr->conn.rnr_retry_cnt                = p_ib_qp_attr->rnr_retry; // MTU\r
490         }\r
491 \r
492         if (p_ib_ah_attr->ah_flags & IB_AH_GRH) {\r
493                 p_ib_av_attr->grh_valid = TRUE;\r
494                 p_ib_av_attr->grh.hop_limit = p_ib_ah_attr->grh.hop_limit;\r
495                 p_ib_av_attr->grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
496                         0, p_ib_ah_attr->grh.traffic_class, p_ib_ah_attr->grh.flow_label );\r
497                 RtlCopyMemory(p_ib_av_attr->grh.dest_gid.raw, \r
498                         p_ib_ah_attr->grh.dgid.raw, sizeof(p_ib_av_attr->grh.dest_gid));\r
499                 err = p_ib_dev->x.get_cached_gid((struct ib_device *)p_ib_dev, \r
500                         p_ib_ah_attr->port_num, p_ib_ah_attr->grh.sgid_index,\r
501                         (union ib_gid*)p_ib_av_attr->grh.src_gid.raw );\r
502                 if (err) {\r
503 \r
504                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
505                         ("ib_get_cached_gid failed %d (%#x). Using default:  sgid_index = 0\n", err, err));\r
506                 }\r
507         }\r
508         else\r
509                 p_ib_av_attr->grh_valid = FALSE;\r
510                 \r
511 \r
512         return err;\r
513 }\r
514 \r
515 enum ib_access_flags\r
516 to_qp_acl(\r
517         IN                              ib_access_t                                     ibal_acl)\r
518 {\r
519 #define IBAL_ACL(ifl,mfl) if (ibal_acl & ifl) acc |= mfl\r
520         enum ib_access_flags acc = 0;\r
521 \r
522         IBAL_ACL(IB_AC_RDMA_READ,IB_ACCESS_REMOTE_READ);\r
523         IBAL_ACL(IB_AC_RDMA_WRITE,IB_ACCESS_REMOTE_WRITE);\r
524         IBAL_ACL(IB_AC_ATOMIC,IB_ACCESS_REMOTE_ATOMIC);\r
525         IBAL_ACL(IB_AC_LOCAL_WRITE,IB_ACCESS_LOCAL_WRITE);\r
526         IBAL_ACL(IB_AC_MW_BIND,IB_ACCESS_MW_BIND);\r
527 \r
528         return acc;\r
529 }\r
530 \r
531 ib_access_t\r
532 from_qp_acl(\r
533         IN              enum ib_access_flags                    acc)\r
534 {\r
535 #define IB_ACL(ifl,mfl) if (acc & ifl) ibal_acl |= mfl\r
536         ib_access_t ibal_acl = 0;\r
537 \r
538         IB_ACL(IB_ACCESS_REMOTE_READ,IB_AC_RDMA_READ);\r
539         IB_ACL(IB_ACCESS_REMOTE_WRITE,IB_AC_RDMA_WRITE);\r
540         IB_ACL(IB_ACCESS_REMOTE_ATOMIC,IB_AC_ATOMIC);\r
541         IB_ACL(IB_ACCESS_LOCAL_WRITE,IB_AC_LOCAL_WRITE);\r
542         IB_ACL(IB_ACCESS_MW_BIND,IB_AC_MW_BIND);\r
543 \r
544         return ibal_acl;\r
545 }\r
546 \r
547 static enum ib_qp_state to_qp_state(ib_qp_state_t ib_qps)\r
548 {\r
549 #define MAP_XIB_QPS(val1,val2) case val1: qps = val2; break\r
550         enum ib_qp_state qps;\r
551         switch (ib_qps) {\r
552                 MAP_XIB_QPS( IB_QPS_RESET, XIB_QPS_RESET );\r
553                 MAP_XIB_QPS( IB_QPS_INIT, XIB_QPS_INIT );\r
554                 MAP_XIB_QPS( IB_QPS_RTR, XIB_QPS_RTR );\r
555                 MAP_XIB_QPS( IB_QPS_RTS, XIB_QPS_RTS );\r
556                 MAP_XIB_QPS( IB_QPS_SQD, XIB_QPS_SQD );\r
557                 MAP_XIB_QPS( IB_QPS_SQD_DRAINING, XIB_QPS_SQD );\r
558                 MAP_XIB_QPS( IB_QPS_SQD_DRAINED, XIB_QPS_SQD );\r
559                 MAP_XIB_QPS( IB_QPS_SQERR, XIB_QPS_SQE );\r
560                 MAP_XIB_QPS( IB_QPS_ERROR, XIB_QPS_ERR );\r
561                 default:\r
562                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps));\r
563                         qps = 0xffffffff;\r
564         }\r
565         return qps;\r
566 }\r
567 \r
568 static ib_qp_state_t  from_qp_state(enum ib_qp_state qps, int draining)\r
569 {\r
570 #define MAP_IB_QPS(val1,val2) case val1: ib_qps = val2; break\r
571         ib_qp_state_t ib_qps;\r
572 \r
573         if (qps == XIB_QPS_SQD) {\r
574                 ib_qps = draining ? IB_QPS_SQD_DRAINING : IB_QPS_SQD;\r
575                 return ib_qps;\r
576         }\r
577 \r
578         switch (qps) {\r
579                 MAP_IB_QPS( XIB_QPS_RESET, IB_QPS_RESET );\r
580                 MAP_IB_QPS( XIB_QPS_INIT, IB_QPS_INIT );\r
581                 MAP_IB_QPS( XIB_QPS_RTR, IB_QPS_RTR );\r
582                 MAP_IB_QPS( XIB_QPS_RTS, IB_QPS_RTS );\r
583                 MAP_IB_QPS( XIB_QPS_SQE, IB_QPS_SQERR );\r
584                 MAP_IB_QPS( XIB_QPS_ERR, IB_QPS_ERROR );\r
585                 default:\r
586                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", qps));\r
587                         ib_qps = 0xffffffff;\r
588         }\r
589         return ib_qps;\r
590 }\r
591 \r
592 ib_api_status_t\r
593 to_qp_attr(\r
594         IN       const  struct ib_qp *p_ib_qp,\r
595         IN                              ib_qp_type_t    qp_type,\r
596         IN       const  ib_qp_mod_t *p_ib_qp_mod,               \r
597         OUT     struct ib_qp_attr *p_ib_qp_attr,\r
598         OUT     int *p_qp_attr_mask\r
599         )\r
600 {\r
601         int err;\r
602         ib_api_status_t         status = IB_SUCCESS;\r
603         struct mlx4_ib_qp *p_mib_qp = (struct mlx4_ib_qp *)p_ib_qp;\r
604 \r
605         RtlZeroMemory( p_ib_qp_attr, sizeof *p_ib_qp_attr );\r
606         *p_qp_attr_mask = IB_QP_STATE;\r
607         p_ib_qp_attr->qp_state = to_qp_state( p_ib_qp_mod->req_state ); \r
608 \r
609         // skipped cases\r
610         if (p_mib_qp->state == XIB_QPS_RESET && p_ib_qp_mod->req_state != IB_QPS_INIT)\r
611                 return IB_NOT_DONE;\r
612                 \r
613         switch (p_ib_qp_mod->req_state) {\r
614         case IB_QPS_RESET:\r
615         case IB_QPS_ERROR:\r
616         case IB_QPS_SQERR:\r
617         case IB_QPS_TIME_WAIT:\r
618                 break;\r
619 \r
620         case IB_QPS_INIT:\r
621                 \r
622                 switch (qp_type) {\r
623                         case IB_QPT_RELIABLE_CONN:\r
624                         case IB_QPT_UNRELIABLE_CONN:\r
625                                 *p_qp_attr_mask |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS;\r
626                                 p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.init.access_ctrl);\r
627                                 break;\r
628                         case IB_QPT_QP0:\r
629                         case IB_QPT_QP1:\r
630                                 // TODO: these cases had IB_QP_PORT in mthca\r
631                                 // TODO: they do not pass ib_modify_qp_is_ok control here\r
632                                 *p_qp_attr_mask |= IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
633                                 p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.init.qkey);\r
634                                 break;\r
635                         case IB_QPT_UNRELIABLE_DGRM:\r
636                         default:        \r
637                                 *p_qp_attr_mask |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
638                                 p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.init.qkey);\r
639                                 break;\r
640                 }                               \r
641                 \r
642                 // IB_QP_PORT\r
643                 p_ib_qp_attr->port_num    = p_ib_qp_mod->state.init.primary_port;\r
644 \r
645                 // IB_QP_PKEY_INDEX\r
646                 p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.init.pkey_index;\r
647 \r
648                 break;\r
649                 \r
650         case IB_QPS_RTR:\r
651                 /* modifying the WQE depth is not supported */\r
652                 if( p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
653                         p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )      {\r
654                         status = IB_UNSUPPORTED;\r
655                         break;\r
656                 }\r
657 \r
658                 switch (qp_type) {\r
659                         case IB_QPT_RELIABLE_CONN:\r
660                                 *p_qp_attr_mask |= /* required flags */\r
661                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC |\r
662                                         IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER;\r
663 \r
664                                 // IB_QP_DEST_QPN\r
665                                 p_ib_qp_attr->dest_qp_num               = cl_ntoh32 (p_ib_qp_mod->state.rtr.dest_qp);\r
666 \r
667                                 // IB_QP_RQ_PSN\r
668                                 p_ib_qp_attr->rq_psn                            = cl_ntoh32 (p_ib_qp_mod->state.rtr.rq_psn);\r
669                                 \r
670                                 // IB_QP_MAX_DEST_RD_ATOMIC\r
671                                 p_ib_qp_attr->max_dest_rd_atomic        = p_ib_qp_mod->state.rtr.resp_res;\r
672 \r
673                                 // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory)\r
674                                 err = to_av(p_ib_qp->device,\r
675                                         &p_ib_qp_mod->state.rtr.primary_av, &p_ib_qp_attr->ah_attr);\r
676                                 if (err) {\r
677                                         status = IB_ERROR;\r
678                                         break;\r
679                                 }\r
680                                 p_ib_qp_attr->path_mtu          = p_ib_qp_mod->state.rtr.primary_av.conn.path_mtu; // MTU\r
681                                 p_ib_qp_attr->timeout           = p_ib_qp_mod->state.rtr.primary_av.conn.local_ack_timeout; // MTU\r
682                                 p_ib_qp_attr->retry_cnt                 = p_ib_qp_mod->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU\r
683                                 p_ib_qp_attr->rnr_retry                 = p_ib_qp_mod->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU\r
684 \r
685                                 // IB_QP_MIN_RNR_TIMER, required in RTR, optional in RTS.\r
686                                 p_ib_qp_attr->min_rnr_timer      = p_ib_qp_mod->state.rtr.rnr_nak_timeout;\r
687 \r
688                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
689                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
690                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
691                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rtr.access_ctrl);\r
692                                 }\r
693 \r
694                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
695                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
696                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* required flag */\r
697                                         err = to_av(p_ib_qp->device,\r
698                                                 &p_ib_qp_mod->state.rtr.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
699                                         if (err) {\r
700                                                 status = IB_ERROR;\r
701                                                 break;\r
702                                         }\r
703                                         p_ib_qp_attr->alt_timeout                = p_ib_qp_mod->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
704                                         p_ib_qp_attr->alt_port_num              = p_ib_qp_mod->state.rtr.alternate_av.port_num;\r
705                                 }\r
706 \r
707                                 // IB_QP_PKEY_INDEX \r
708                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_PKEY) {\r
709                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
710                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rtr.pkey_index;\r
711                                 }\r
712                                 break;\r
713                                 \r
714                         case IB_QPT_UNRELIABLE_CONN:\r
715                                 *p_qp_attr_mask |= /* required flags */\r
716                                         IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU;\r
717 \r
718                                 // IB_QP_DEST_QPN\r
719                                 p_ib_qp_attr->dest_qp_num               = cl_ntoh32 (p_ib_qp_mod->state.rtr.dest_qp);\r
720 \r
721                                 // IB_QP_RQ_PSN\r
722                                 p_ib_qp_attr->rq_psn                            = cl_ntoh32 (p_ib_qp_mod->state.rtr.rq_psn);\r
723 \r
724                                 // IB_QP_PATH_MTU\r
725                                 p_ib_qp_attr->path_mtu          = p_ib_qp_mod->state.rtr.primary_av.conn.path_mtu;\r
726 \r
727                                 // IB_QP_AV: Convert primary AV (mandatory)\r
728                                 err = to_av(p_ib_qp->device,\r
729                                         &p_ib_qp_mod->state.rtr.primary_av, &p_ib_qp_attr->ah_attr);\r
730                                 if (err) {\r
731                                         status = IB_ERROR;\r
732                                         break;\r
733                                 }\r
734 \r
735                                 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
736                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
737                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flag */\r
738                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rtr.access_ctrl);\r
739                                 }\r
740 \r
741                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
742                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
743                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* required flag */\r
744                                         err = to_av(p_ib_qp->device,\r
745                                                 &p_ib_qp_mod->state.rtr.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
746                                         if (err) {\r
747                                                 status = IB_ERROR;\r
748                                                 break;\r
749                                         }\r
750                                         p_ib_qp_attr->alt_timeout               = p_ib_qp_mod->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
751                                         p_ib_qp_attr->alt_port_num              = p_ib_qp_mod->state.rtr.alternate_av.port_num;\r
752                                 }\r
753 \r
754                                 // IB_QP_PKEY_INDEX \r
755                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_PKEY) {\r
756                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
757                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rtr.pkey_index;\r
758                                 }\r
759                                 break;\r
760                                         \r
761                         case IB_QPT_UNRELIABLE_DGRM:\r
762                         case IB_QPT_QP0:\r
763                         case IB_QPT_QP1:\r
764                         default:        \r
765                                 // IB_QP_PKEY_INDEX \r
766                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_PKEY) {\r
767                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
768                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rtr.pkey_index;\r
769                                 }\r
770 \r
771                                 // IB_QP_QKEY\r
772                                 if (p_ib_qp_mod->state.rtr.opts & IB_MOD_QP_QKEY) {\r
773                                         *p_qp_attr_mask |= IB_QP_QKEY;  \r
774                                         p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.rtr.qkey);\r
775                                 }\r
776                                 break;\r
777                                 \r
778                 }\r
779                 break;\r
780                 \r
781         case IB_QPS_RTS:\r
782                 /* modifying the WQE depth is not supported */\r
783                 if( p_ib_qp_mod->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
784                         p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
785                 {\r
786                         status = IB_UNSUPPORTED;\r
787                         break;\r
788                 }\r
789 \r
790                 switch (qp_type) {\r
791                         case IB_QPT_RELIABLE_CONN:\r
792                                 if (p_mib_qp->state != XIB_QPS_RTS)\r
793                                         *p_qp_attr_mask |= /* required flags */\r
794                                                 IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT |\r
795                                                 IB_QP_RETRY_CNT |IB_QP_RNR_RETRY;\r
796 \r
797                                 // IB_QP_MAX_QP_RD_ATOMIC\r
798                                 p_ib_qp_attr->max_rd_atomic     = p_ib_qp_mod->state.rts.init_depth;\r
799 \r
800                                 // IB_QP_TIMEOUT\r
801                                 p_ib_qp_attr->timeout            = p_ib_qp_mod->state.rts.local_ack_timeout; // XXX: conv\r
802                                 \r
803                                 // IB_QP_RETRY_CNT\r
804                                 p_ib_qp_attr->retry_cnt = p_ib_qp_mod->state.rts.retry_cnt;\r
805                                 \r
806                                 // IB_QP_RNR_RETRY\r
807                                 p_ib_qp_attr->rnr_retry  = p_ib_qp_mod->state.rts.rnr_retry_cnt;\r
808 \r
809                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
810                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
811                                         *p_qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
812                                         p_ib_qp_attr->max_dest_rd_atomic = p_ib_qp_mod->state.rts.resp_res;\r
813                                 }\r
814 \r
815 #ifdef WIN_TO_BE_REMOVED\r
816                 //TODO: do we need that ?\r
817                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
818 \r
819                                 // IB_QP_PKEY_INDEX \r
820                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_PKEY) {\r
821                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
822                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rts.pkey_index;\r
823                                 }\r
824 #endif                          \r
825 \r
826                                 // IB_QP_MIN_RNR_TIMER\r
827                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
828                                         *p_qp_attr_mask |= IB_QP_MIN_RNR_TIMER; \r
829                                         p_ib_qp_attr->min_rnr_timer      = p_ib_qp_mod->state.rts.rnr_nak_timeout;\r
830                                 }\r
831 \r
832                                 // IB_QP_PATH_MIG_STATE\r
833                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
834                                         *p_qp_attr_mask |= IB_QP_PATH_MIG_STATE;        \r
835                                         p_ib_qp_attr->path_mig_state =  p_ib_qp_mod->state.rts.apm_state;\r
836                                 }\r
837 \r
838                                 // IB_QP_ACCESS_FLAGS\r
839                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
840                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
841                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rts.access_ctrl);\r
842                                 }\r
843 \r
844                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
845                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
846                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* optional flag */\r
847                                         err = to_av(p_ib_qp->device,\r
848                                                 &p_ib_qp_mod->state.rts.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
849                                         if (err) {\r
850                                                 status = IB_ERROR;\r
851                                                 break;\r
852                                         }\r
853                                         p_ib_qp_attr->alt_timeout                = p_ib_qp_mod->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
854                                 }\r
855                                 break;\r
856                                 \r
857                         case IB_QPT_UNRELIABLE_CONN:\r
858                                 if (p_mib_qp->state != XIB_QPS_RTS)\r
859                                         *p_qp_attr_mask |= /* required flags */\r
860                                                 IB_QP_SQ_PSN;\r
861 \r
862                                 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
863                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
864                                         *p_qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC;    \r
865                                         p_ib_qp_attr->max_dest_rd_atomic = p_ib_qp_mod->state.rts.resp_res;\r
866                                 }\r
867 \r
868 #ifdef WIN_TO_BE_REMOVED\r
869                 //TODO: do we need that ?\r
870                 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
871 \r
872                                 // IB_QP_PKEY_INDEX \r
873                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_PKEY) {\r
874                                         *p_qp_attr_mask |= IB_QP_PKEY_INDEX;    \r
875                                         p_ib_qp_attr->pkey_index = p_ib_qp_mod->state.rts.pkey_index;\r
876                                 }\r
877 #endif                          \r
878 \r
879                                 // IB_QP_PATH_MIG_STATE\r
880                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_APM_STATE) {\r
881                                         *p_qp_attr_mask |= IB_QP_PATH_MIG_STATE;        \r
882                                         p_ib_qp_attr->path_mig_state =  p_ib_qp_mod->state.rts.apm_state;\r
883                                 }\r
884 \r
885                                 // IB_QP_ACCESS_FLAGS\r
886                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
887                                         *p_qp_attr_mask |= IB_QP_ACCESS_FLAGS;          /* optional flags */\r
888                                         p_ib_qp_attr->qp_access_flags = to_qp_acl(p_ib_qp_mod->state.rts.access_ctrl);\r
889                                 }\r
890 \r
891                                 // IB_QP_ALT_PATH: Convert alternate RC AV\r
892                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
893                                         *p_qp_attr_mask |= IB_QP_ALT_PATH;      /* optional flag */\r
894                                         err = to_av(p_ib_qp->device,\r
895                                                 &p_ib_qp_mod->state.rts.alternate_av, &p_ib_qp_attr->alt_ah_attr);\r
896                                         if (err) {\r
897                                                 status = IB_ERROR;\r
898                                                 break;\r
899                                         }\r
900                                 }\r
901                                 break;\r
902                                         \r
903                         case IB_QPT_UNRELIABLE_DGRM:\r
904                         case IB_QPT_QP0:\r
905                         case IB_QPT_QP1:\r
906                         default:        \r
907                                 if (p_mib_qp->state != XIB_QPS_RTS)\r
908                                         *p_qp_attr_mask |= /* required flags */\r
909                                                 IB_QP_SQ_PSN;\r
910 \r
911                                 // IB_QP_QKEY\r
912                                 if (p_ib_qp_mod->state.rts.opts & IB_MOD_QP_QKEY) {\r
913                                         *p_qp_attr_mask |= IB_QP_QKEY;  \r
914                                         p_ib_qp_attr->qkey       = cl_ntoh32 (p_ib_qp_mod->state.rts.qkey);\r
915                                 }\r
916                                 break;\r
917                                 \r
918                                 break;\r
919                                 \r
920                 }\r
921 \r
922                 // IB_QP_SQ_PSN: common for all\r
923                 p_ib_qp_attr->sq_psn = cl_ntoh32 (p_ib_qp_mod->state.rts.sq_psn);\r
924                 //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL\r
925                 break;\r
926                 \r
927         case IB_QPS_SQD:\r
928         case IB_QPS_SQD_DRAINING:\r
929         case IB_QPS_SQD_DRAINED:\r
930                 *p_qp_attr_mask |= IB_QP_EN_SQD_ASYNC_NOTIFY;\r
931                 p_ib_qp_attr->en_sqd_async_notify = (u8)p_ib_qp_mod->state.sqd.sqd_event;\r
932                 HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n"));\r
933                 break;\r
934                 \r
935         default:        \r
936                 //NB: is this an error case and we need this message  ? What about returning an error ?\r
937                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", p_ib_qp_mod->req_state));\r
938                 break;\r
939                 \r
940         }\r
941 \r
942         return status;\r
943 }       \r
944 \r
945 enum ib_qp_type to_qp_type(ib_qp_type_t qp_type)\r
946 {\r
947 #define MAP_TYPE(val1,val2) case val1: ib_qp_type = val2; break\r
948         enum ib_qp_type ib_qp_type;\r
949 \r
950         switch (qp_type) {\r
951                 MAP_TYPE( IB_QPT_RELIABLE_CONN, IB_QPT_RC );\r
952                 MAP_TYPE( IB_QPT_UNRELIABLE_CONN, IB_QPT_UC );\r
953                 MAP_TYPE( IB_QPT_UNRELIABLE_DGRM, IB_QPT_UD );\r
954                 MAP_TYPE( IB_QPT_QP0, IB_QPT_SMI );\r
955                 MAP_TYPE( IB_QPT_QP1, IB_QPT_GSI );\r
956                 MAP_TYPE( IB_QPT_RAW_IPV6, IB_QPT_RAW_IP_V6 );\r
957                 MAP_TYPE( IB_QPT_RAW_ETHER, IB_QPT_RAW_ETY );\r
958                 default:\r
959                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
960                                 ("Unmapped MLX4 ib_wc_type %d\n", qp_type));\r
961                         ib_qp_type = 0xffffffff;\r
962         }\r
963         return ib_qp_type;\r
964 }\r
965 \r
966 ib_qp_type_t  from_qp_type(enum ib_qp_type ib_qp_type)\r
967 {\r
968 #define MAP_IB_TYPE(val1,val2) case val1: qp_type = val2; break\r
969         ib_qp_type_t qp_type;\r
970 \r
971         switch (ib_qp_type) {\r
972                 MAP_IB_TYPE( IB_QPT_RC, IB_QPT_RELIABLE_CONN );\r
973                 MAP_IB_TYPE( IB_QPT_UC, IB_QPT_UNRELIABLE_CONN );\r
974                 MAP_IB_TYPE( IB_QPT_UD, IB_QPT_UNRELIABLE_DGRM );\r
975                 MAP_IB_TYPE( IB_QPT_SMI, IB_QPT_QP0 );\r
976                 MAP_IB_TYPE( IB_QPT_GSI, IB_QPT_QP1 );\r
977                 MAP_IB_TYPE( IB_QPT_RAW_IP_V6, IB_QPT_RAW_IPV6 );\r
978                 MAP_IB_TYPE( IB_QPT_RAW_ETY, IB_QPT_RAW_ETHER );\r
979                 default:\r
980                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,\r
981                                 ("Unmapped MLX4 ib_wc_type %d\n", ib_qp_type));\r
982                         qp_type = 0xffffffff;\r
983         }\r
984         return qp_type;\r
985 }\r
986 \r
987 ib_apm_state_t from_apm_state(enum ib_mig_state apm)\r
988 {\r
989         if (apm == IB_MIG_MIGRATED) return IB_APM_MIGRATED;\r
990         if (apm == IB_MIG_REARM) return IB_APM_REARM;\r
991         if (apm == IB_MIG_ARMED) return IB_APM_ARMED;\r
992         return 0xffffffff;\r
993 }\r
994 \r
995 ib_api_status_t\r
996 from_qp_attr(\r
997         IN       const  struct ib_qp    *p_ib_qp,\r
998         IN      struct ib_qp_attr               *p_ib_qp_attr,\r
999         OUT     ib_qp_attr_t            *p_qp_attr\r
1000         )\r
1001 {\r
1002         int err;\r
1003         RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
1004         p_qp_attr->h_pd = (ib_pd_handle_t)p_ib_qp->pd;\r
1005         p_qp_attr->qp_type = from_qp_type(p_ib_qp->qp_type);\r
1006         p_qp_attr->access_ctrl = from_qp_acl(p_ib_qp_attr->qp_access_flags);\r
1007         p_qp_attr->pkey_index = p_ib_qp_attr->pkey_index;\r
1008 \r
1009         p_qp_attr->sq_max_inline = p_ib_qp_attr->cap.max_inline_data;\r
1010         p_qp_attr->sq_depth = p_ib_qp_attr->cap.max_send_wr;\r
1011         p_qp_attr->rq_depth = p_ib_qp_attr->cap.max_recv_wr;\r
1012         p_qp_attr->sq_sge = p_ib_qp_attr->cap.max_send_sge;\r
1013         p_qp_attr->rq_sge = p_ib_qp_attr->cap.max_recv_sge;\r
1014         p_qp_attr->init_depth = p_ib_qp_attr->max_rd_atomic;\r
1015         p_qp_attr->resp_res = p_ib_qp_attr->max_dest_rd_atomic;\r
1016 \r
1017         p_qp_attr->h_sq_cq = (ib_cq_handle_t)p_ib_qp->send_cq;\r
1018         p_qp_attr->h_rq_cq = (ib_cq_handle_t)p_ib_qp->recv_cq;\r
1019         p_qp_attr->h_srq = (ib_srq_handle_t)p_ib_qp->srq;\r
1020 \r
1021         p_qp_attr->sq_signaled = !!((struct mlx4_ib_qp *)p_ib_qp)->sq_signal_bits;\r
1022 \r
1023         p_qp_attr->state = from_qp_state( p_ib_qp_attr->qp_state, \r
1024                 p_ib_qp_attr->sq_draining);\r
1025         p_qp_attr->num = cl_hton32(p_ib_qp->qp_num);\r
1026         p_qp_attr->dest_num = cl_hton32(p_ib_qp_attr->dest_qp_num);\r
1027         p_qp_attr->qkey = cl_hton32(p_ib_qp_attr->qkey);\r
1028 \r
1029         p_qp_attr->sq_psn = cl_hton32(p_ib_qp_attr->sq_psn);\r
1030         p_qp_attr->rq_psn = cl_hton32(p_ib_qp_attr->rq_psn);\r
1031 \r
1032         p_qp_attr->primary_port = p_ib_qp_attr->port_num;\r
1033         p_qp_attr->alternate_port = p_ib_qp_attr->alt_port_num;\r
1034         err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->ah_attr, &p_qp_attr->primary_av);\r
1035         if (err)\r
1036                 goto err_av;\r
1037         err = from_av( p_ib_qp->device, p_ib_qp_attr, &p_ib_qp_attr->alt_ah_attr, &p_qp_attr->alternate_av);\r
1038         if (err)\r
1039                 goto err_av;\r
1040         p_qp_attr->apm_state = from_apm_state(p_ib_qp_attr->path_mig_state);\r
1041 \r
1042         return IB_SUCCESS;\r
1043 \r
1044 err_av:\r
1045         return errno_to_iberr(err);\r
1046 }\r
1047 \r