2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
30 * $Id: hca_data.c 148 2005-07-12 07:48:46Z sleybo $
\r
34 #include "hca_driver.h"
\r
35 #include "hca_utils.h"
\r
37 #if defined(EVENT_TRACING)
\r
41 #include "hca_data.tmh"
\r
44 #include "mthca_dev.h"
\r
45 #include <ib_cache.h>
\r
47 static cl_spinlock_t hob_lock;
\r
51 uint32_t g_mlnx_dpc2thread = 0;
\r
54 cl_qlist_t mlnx_hca_list;
\r
56 mlnx_hob_t mlnx_hob_array[MLNX_NUM_HOBKL]; // kernel HOB - one per HCA (cmdif access)
\r
57 mlnx_hobul_t *mlnx_hobul_array[MLNX_NUM_HOBUL]; // kernel HOBUL - one per HCA (kar access)
\r
59 /////////////////////////////////////////////////////////
\r
61 /////////////////////////////////////////////////////////
\r
64 IN mlnx_hca_t *p_hca )
\r
66 cl_spinlock_acquire( &hob_lock );
\r
67 cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );
\r
68 cl_spinlock_release( &hob_lock );
\r
73 IN mlnx_hca_t *p_hca )
\r
75 cl_spinlock_acquire( &hob_lock );
\r
76 cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );
\r
77 cl_spinlock_release( &hob_lock );
\r
82 IN ib_net64_t guid )
\r
84 cl_list_item_t *p_item;
\r
85 mlnx_hca_t *p_hca = NULL;
\r
87 cl_spinlock_acquire( &hob_lock );
\r
88 p_item = cl_qlist_head( &mlnx_hca_list );
\r
89 while( p_item != cl_qlist_end( &mlnx_hca_list ) )
\r
91 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );
\r
92 if( p_hca->guid == guid )
\r
94 p_item = cl_qlist_next( p_item );
\r
97 cl_spinlock_release( &hob_lock );
\r
103 mlnx_names_from_guid(
\r
104 IN ib_net64_t guid,
\r
105 OUT char **hca_name_p,
\r
106 OUT char **dev_name_p)
\r
110 if (!hca_name_p) return;
\r
111 if (!dev_name_p) return;
\r
113 for (idx = 0; idx < mlnx_num_hca; idx++)
\r
115 if (mlnx_hca_array[idx].ifx.guid == guid)
\r
117 *hca_name_p = mlnx_hca_array[idx].hca_name_p;
\r
118 *dev_name_p = mlnx_hca_array[idx].dev_name_p;
\r
124 /////////////////////////////////////////////////////////
\r
126 /////////////////////////////////////////////////////////
\r
128 mlnx_hcas_init( void )
\r
130 cl_qlist_init( &mlnx_hca_list );
\r
131 return cl_spinlock_init( &hob_lock );
\r
135 /////////////////////////////////////////////////////////
\r
136 /////////////////////////////////////////////////////////
\r
139 IN mlnx_hob_t *hob_p,
\r
140 IN ci_completion_cb_t comp_cb_p,
\r
141 IN ci_async_event_cb_t async_cb_p,
\r
142 IN const void* const ib_context)
\r
144 cl_status_t cl_status;
\r
146 // Setup the callbacks
\r
147 if (!hob_p->async_proc_mgr_p)
\r
149 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );
\r
150 if( !hob_p->async_proc_mgr_p )
\r
152 return IB_INSUFFICIENT_MEMORY;
\r
154 cl_async_proc_construct( hob_p->async_proc_mgr_p );
\r
155 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );
\r
156 if( cl_status != CL_SUCCESS )
\r
158 cl_async_proc_destroy( hob_p->async_proc_mgr_p );
\r
159 cl_free(hob_p->async_proc_mgr_p);
\r
160 hob_p->async_proc_mgr_p = NULL;
\r
161 return IB_INSUFFICIENT_RESOURCES;
\r
165 hob_p->comp_cb_p = comp_cb_p;
\r
166 hob_p->async_cb_p = async_cb_p;
\r
167 hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL
\r
168 HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array), ib_context));
\r
172 /////////////////////////////////////////////////////////
\r
173 /////////////////////////////////////////////////////////
\r
176 IN mlnx_hob_t *hob_p)
\r
178 cl_async_proc_t *p_async_proc;
\r
179 mlnx_cache_t *p_cache;
\r
181 cl_spinlock_acquire( &hob_lock );
\r
183 hob_p->mark = E_MARK_INVALID;
\r
185 p_async_proc = hob_p->async_proc_mgr_p;
\r
186 hob_p->async_proc_mgr_p = NULL;
\r
188 p_cache = hob_p->cache;
\r
189 hob_p->cache = NULL;
\r
191 hob_p->comp_cb_p = NULL;
\r
192 hob_p->async_cb_p = NULL;
\r
193 hob_p->ca_context = NULL;
\r
194 hob_p->cl_device_h = NULL;
\r
196 cl_spinlock_release( &hob_lock );
\r
200 cl_async_proc_destroy( p_async_proc );
\r
201 cl_free( p_async_proc );
\r
205 cl_free( p_cache );
\r
207 HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hobs_remove idx %d \n", (int)(hob_p - mlnx_hob_array)));
\r
210 /////////////////////////////////////////////////////////
\r
211 /////////////////////////////////////////////////////////
\r
213 mthca_port_cap_to_ibal(
\r
214 IN u32 mthca_port_cap,
\r
215 OUT ib_port_cap_t *ibal_port_cap_p)
\r
217 if (mthca_port_cap & IB_PORT_CM_SUP)
\r
218 ibal_port_cap_p->cm = TRUE;
\r
219 if (mthca_port_cap & IB_PORT_SNMP_TUNNEL_SUP)
\r
220 ibal_port_cap_p->snmp = TRUE;
\r
221 if (mthca_port_cap & IB_PORT_DEVICE_MGMT_SUP)
\r
222 ibal_port_cap_p->dev_mgmt = TRUE;
\r
223 if (mthca_port_cap & IB_PORT_VENDOR_CLASS_SUP)
\r
224 ibal_port_cap_p->vend = TRUE;
\r
225 if (mthca_port_cap & IB_PORT_SM_DISABLED)
\r
226 ibal_port_cap_p->sm_disable = TRUE;
\r
227 if (mthca_port_cap & IB_PORT_SM)
\r
228 ibal_port_cap_p->sm = TRUE;
\r
232 /////////////////////////////////////////////////////////
\r
235 IN struct ib_device *ib_dev,
\r
236 IN struct ib_device_attr *hca_info_p,
\r
237 IN struct ib_port_attr *hca_ports,
\r
238 OUT ib_ca_attr_t *ca_attr_p)
\r
241 ib_port_attr_t *ibal_port_p;
\r
242 struct ib_port_attr *mthca_port_p;
\r
244 ca_attr_p->vend_id = hca_info_p->vendor_id;
\r
245 ca_attr_p->dev_id = (uint16_t)hca_info_p->vendor_part_id;
\r
246 ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;
\r
247 ca_attr_p->fw_ver = hca_info_p->fw_ver;
\r
248 ca_attr_p->ca_guid = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid;
\r
249 ca_attr_p->num_ports = ib_dev->phys_port_cnt;
\r
250 ca_attr_p->max_qps = hca_info_p->max_qp;
\r
251 ca_attr_p->max_wrs = hca_info_p->max_qp_wr;
\r
252 ca_attr_p->max_sges = hca_info_p->max_sge;
\r
253 ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd;
\r
254 ca_attr_p->max_cqs = hca_info_p->max_cq;
\r
255 ca_attr_p->max_cqes = hca_info_p->max_cqe;
\r
256 ca_attr_p->max_pds = hca_info_p->max_pd;
\r
257 ca_attr_p->init_regions = hca_info_p->max_mr;
\r
258 ca_attr_p->init_windows = hca_info_p->max_mw;
\r
259 ca_attr_p->init_region_size = hca_info_p->max_mr_size;
\r
260 ca_attr_p->max_addr_handles = hca_info_p->max_ah;
\r
261 ca_attr_p->atomicity = hca_info_p->atomic_cap;
\r
262 ca_attr_p->max_partitions = hca_info_p->max_pkeys;
\r
263 ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom;
\r
264 ca_attr_p->max_resp_res = (uint8_t)hca_info_p->max_res_rd_atom;
\r
265 ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom;
\r
266 ca_attr_p->max_ipv6_qps = hca_info_p->max_raw_ipv6_qp;
\r
267 ca_attr_p->max_ether_qps = hca_info_p->max_raw_ethy_qp;
\r
268 ca_attr_p->max_mcast_grps = hca_info_p->max_mcast_grp;
\r
269 ca_attr_p->max_mcast_qps = hca_info_p->max_total_mcast_qp_attach;
\r
270 ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;
\r
271 ca_attr_p->max_fmr = hca_info_p->max_fmr;
\r
272 ca_attr_p->max_map_per_fmr = hca_info_p->max_map_per_fmr;
\r
274 ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;
\r
275 ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;
\r
276 ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;
\r
277 ca_attr_p->raw_mcast_support = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI;
\r
278 ca_attr_p->apm_support = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG;
\r
279 ca_attr_p->av_port_check = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;
\r
280 ca_attr_p->change_primary_port = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;
\r
281 ca_attr_p->modify_wr_depth = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;
\r
282 ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host
\r
284 ca_attr_p->num_page_sizes = 1;
\r
285 ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap
\r
287 for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num)
\r
289 // Setup port pointers
\r
290 ibal_port_p = &ca_attr_p->p_port_attr[port_num];
\r
291 mthca_port_p = &hca_ports[port_num];
\r
293 // Port Cabapilities
\r
294 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));
\r
295 mthca_port_cap_to_ibal(mthca_port_p->port_cap_flags, &ibal_port_p->cap);
\r
298 ibal_port_p->port_num = port_num + start_port(ib_dev);
\r
299 ibal_port_p->port_guid = ibal_port_p->p_gid_table[0].unicast.interface_id;
\r
300 ibal_port_p->lid = cl_ntoh16(mthca_port_p->lid);
\r
301 ibal_port_p->lmc = mthca_port_p->lmc;
\r
302 ibal_port_p->max_vls = mthca_port_p->max_vl_num;
\r
303 ibal_port_p->sm_lid = cl_ntoh16(mthca_port_p->sm_lid);
\r
304 ibal_port_p->sm_sl = mthca_port_p->sm_sl;
\r
305 ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN;
\r
306 ibal_port_p->num_gids = (uint16_t)mthca_port_p->gid_tbl_len;
\r
307 ibal_port_p->num_pkeys = mthca_port_p->pkey_tbl_len;
\r
308 ibal_port_p->pkey_ctr = (uint16_t)mthca_port_p->bad_pkey_cntr;
\r
309 ibal_port_p->qkey_ctr = (uint16_t)mthca_port_p->qkey_viol_cntr;
\r
310 ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz;
\r
311 ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu;
\r
313 ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;
\r
314 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec
\r
315 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",
\r
316 ibal_port_p->port_num, cl_ntoh64(ibal_port_p->port_guid)));
\r
320 void cq_comp_handler(struct ib_cq *cq, void *context)
\r
322 mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
\r
323 struct mthca_cq *mcq =(struct mthca_cq *)cq;
\r
324 HCA_ENTER(HCA_DBG_CQ);
\r
326 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));
\r
327 (hob_p->comp_cb_p)(mcq->cq_context);
\r
330 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));
\r
332 HCA_EXIT(HCA_DBG_CQ);
\r
335 void ca_event_handler(struct ib_event *ev, void *context)
\r
337 mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
\r
338 ib_event_rec_t event_rec;
\r
340 // prepare parameters
\r
341 event_rec.context = (void *)hob_p->ca_context;
\r
342 event_rec.trap.info.port_num = ev->element.port_num;
\r
343 event_rec.type = ev->event;
\r
344 if (event_rec.type > IB_AE_UNKNOWN) {
\r
345 // CL_ASSERT(0); // This shouldn't happen
\r
346 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n",
\r
347 event_rec.type, IB_AE_LOCAL_FATAL));
\r
348 event_rec.type = IB_AE_LOCAL_FATAL;
\r
351 // call the user callback
\r
353 (hob_p->async_cb_p)(&event_rec);
\r
355 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));
\r
359 void qp_event_handler(struct ib_event *ev, void *context)
\r
361 mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
\r
362 ib_event_rec_t event_rec;
\r
363 struct mthca_qp *qp_p;
\r
365 // prepare parameters
\r
366 event_rec.type = ev->event;
\r
367 qp_p = (struct mthca_qp *)ev->element.qp;
\r
368 event_rec.context = qp_p->qp_context;
\r
370 // call the user callback
\r
372 (hob_p->async_cb_p)(&event_rec);
\r
374 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));
\r
378 void cq_event_handler(struct ib_event *ev, void *context)
\r
380 mlnx_hob_t *hob_p = (mlnx_hob_t *)context;
\r
381 ib_event_rec_t event_rec;
\r
382 struct mthca_cq *cq_p;
\r
384 // prepare parameters
\r
385 event_rec.type = ev->event;
\r
386 cq_p = (struct mthca_cq *)ev->element.cq;
\r
387 event_rec.context = cq_p->cq_context;
\r
389 // call the user callback
\r
391 (hob_p->async_cb_p)(&event_rec);
\r
393 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));
\r
397 ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps)
\r
399 #define MAP_QPS(val1,val2) case val1: ib_qps = val2; break
\r
400 ib_qp_state_t ib_qps;
\r
402 MAP_QPS( IBQPS_RESET, IB_QPS_RESET );
\r
403 MAP_QPS( IBQPS_INIT, IB_QPS_INIT );
\r
404 MAP_QPS( IBQPS_RTR, IB_QPS_RTR );
\r
405 MAP_QPS( IBQPS_RTS, IB_QPS_RTS );
\r
406 MAP_QPS( IBQPS_SQD, IB_QPS_SQD );
\r
407 MAP_QPS( IBQPS_SQE, IB_QPS_SQERR );
\r
408 MAP_QPS( IBQPS_ERR, IB_QPS_ERROR );
\r
410 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped MTHCA qp_state %d\n", qps));
\r
411 ib_qps = 0xffffffff;
\r
416 enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps)
\r
418 #define MAP_IBQPS(val1,val2) case val1: qps = val2; break
\r
419 enum ib_qp_state qps;
\r
421 MAP_IBQPS( IB_QPS_RESET, IBQPS_RESET );
\r
422 MAP_IBQPS( IB_QPS_INIT, IBQPS_INIT );
\r
423 MAP_IBQPS( IB_QPS_RTR, IBQPS_RTR );
\r
424 MAP_IBQPS( IB_QPS_RTS, IBQPS_RTS );
\r
425 MAP_IBQPS( IB_QPS_SQD, IBQPS_SQD );
\r
426 MAP_IBQPS( IB_QPS_SQD_DRAINING, IBQPS_SQD );
\r
427 MAP_IBQPS( IB_QPS_SQD_DRAINED, IBQPS_SQD );
\r
428 MAP_IBQPS( IB_QPS_SQERR, IBQPS_SQE );
\r
429 MAP_IBQPS( IB_QPS_ERROR, IBQPS_ERR );
\r
431 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps));
\r
438 mlnx_conv_qp_modify_attr(
\r
439 IN const struct ib_qp *ib_qp_p,
\r
440 IN ib_qp_type_t qp_type,
\r
441 IN const ib_qp_mod_t *modify_attr_p,
\r
442 OUT struct ib_qp_attr *qp_attr_p,
\r
443 OUT int *qp_attr_mask_p
\r
447 ib_api_status_t status = IB_SUCCESS;
\r
448 struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;
\r
450 RtlZeroMemory( qp_attr_p, sizeof *qp_attr_p );
\r
451 *qp_attr_mask_p = IB_QP_STATE;
\r
452 qp_attr_p->qp_state = mlnx_qps_from_ibal( modify_attr_p->req_state );
\r
455 if (qp_p->state == IBQPS_RESET && modify_attr_p->req_state != IB_QPS_INIT)
\r
456 return IB_NOT_DONE;
\r
458 switch (modify_attr_p->req_state) {
\r
462 case IB_QPS_TIME_WAIT:
\r
468 case IB_QPT_RELIABLE_CONN:
\r
469 case IB_QPT_UNRELIABLE_CONN:
\r
470 *qp_attr_mask_p |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS;
\r
471 qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);
\r
473 case IB_QPT_UNRELIABLE_DGRM:
\r
477 *qp_attr_mask_p |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ;
\r
478 qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.init.qkey);
\r
483 qp_attr_p->port_num = modify_attr_p->state.init.primary_port;
\r
485 // IB_QP_PKEY_INDEX
\r
486 qp_attr_p->pkey_index = modify_attr_p->state.init.pkey_index;
\r
491 /* modifying the WQE depth is not supported */
\r
492 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||
\r
493 modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH ) {
\r
494 status = IB_UNSUPPORTED;
\r
499 case IB_QPT_RELIABLE_CONN:
\r
500 *qp_attr_mask_p |= /* required flags */
\r
501 IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC |
\r
502 IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER;
\r
505 qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);
\r
508 qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);
\r
510 // IB_QP_MAX_DEST_RD_ATOMIC
\r
511 qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rtr.resp_res;
\r
513 // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory)
\r
514 err = mlnx_conv_ibal_av(ib_qp_p->device,
\r
515 &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);
\r
520 qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU
\r
521 qp_attr_p->timeout = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // MTU
\r
522 qp_attr_p->retry_cnt = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU
\r
523 qp_attr_p->rnr_retry = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU
\r
525 // IB_QP_MIN_RNR_TIMER, required in RTR, optional in RTS.
\r
526 qp_attr_p->min_rnr_timer = modify_attr_p->state.rtr.rnr_nak_timeout;
\r
528 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags
\r
529 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {
\r
530 *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flag */
\r
531 qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl);
\r
534 // IB_QP_ALT_PATH: Convert alternate RC AV
\r
535 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {
\r
536 *qp_attr_mask_p |= IB_QP_ALT_PATH; /* required flag */
\r
537 err = mlnx_conv_ibal_av(ib_qp_p->device,
\r
538 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);
\r
543 qp_attr_p->alt_timeout = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv
\r
546 // IB_QP_PKEY_INDEX
\r
547 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {
\r
548 *qp_attr_mask_p |= IB_QP_PKEY_INDEX;
\r
549 qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;
\r
553 case IB_QPT_UNRELIABLE_CONN:
\r
554 *qp_attr_mask_p |= /* required flags */
\r
555 IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU;
\r
558 qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);
\r
561 qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);
\r
564 qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu;
\r
566 // IB_QP_AV: Convert primary AV (mandatory)
\r
567 err = mlnx_conv_ibal_av(ib_qp_p->device,
\r
568 &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);
\r
574 // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags
\r
575 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {
\r
576 *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flag */
\r
577 qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl);
\r
580 // IB_QP_ALT_PATH: Convert alternate RC AV
\r
581 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {
\r
582 *qp_attr_mask_p |= IB_QP_ALT_PATH; /* required flag */
\r
583 err = mlnx_conv_ibal_av(ib_qp_p->device,
\r
584 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);
\r
591 // IB_QP_PKEY_INDEX
\r
592 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {
\r
593 *qp_attr_mask_p |= IB_QP_PKEY_INDEX;
\r
594 qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;
\r
598 case IB_QPT_UNRELIABLE_DGRM:
\r
602 // IB_QP_PKEY_INDEX
\r
603 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {
\r
604 *qp_attr_mask_p |= IB_QP_PKEY_INDEX;
\r
605 qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;
\r
609 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) {
\r
610 *qp_attr_mask_p |= IB_QP_QKEY;
\r
611 qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.rtr.qkey);
\r
619 /* modifying the WQE depth is not supported */
\r
620 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||
\r
621 modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )
\r
623 status = IB_UNSUPPORTED;
\r
628 case IB_QPT_RELIABLE_CONN:
\r
629 if (qp_p->state != IBQPS_RTS)
\r
630 *qp_attr_mask_p |= /* required flags */
\r
631 IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT |
\r
632 IB_QP_RETRY_CNT |IB_QP_RNR_RETRY;
\r
634 // IB_QP_MAX_QP_RD_ATOMIC
\r
635 qp_attr_p->max_rd_atomic = modify_attr_p->state.rts.init_depth;
\r
638 qp_attr_p->timeout = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv
\r
641 qp_attr_p->retry_cnt = modify_attr_p->state.rts.retry_cnt;
\r
644 qp_attr_p->rnr_retry = modify_attr_p->state.rts.rnr_retry_cnt;
\r
646 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)
\r
647 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {
\r
648 *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;
\r
649 qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;
\r
652 #ifdef WIN_TO_BE_REMOVED
\r
653 //TODO: do we need that ?
\r
654 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.
\r
656 // IB_QP_PKEY_INDEX
\r
657 if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) {
\r
658 *qp_attr_mask_p |= IB_QP_PKEY_INDEX;
\r
659 qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;
\r
663 // IB_QP_MIN_RNR_TIMER
\r
664 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {
\r
665 *qp_attr_mask_p |= IB_QP_MIN_RNR_TIMER;
\r
666 qp_attr_p->min_rnr_timer = modify_attr_p->state.rts.rnr_nak_timeout;
\r
669 // IB_QP_PATH_MIG_STATE
\r
670 if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) {
\r
671 *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;
\r
672 qp_attr_p->path_mig_state = modify_attr_p->state.rts.apm_state;
\r
675 // IB_QP_ACCESS_FLAGS
\r
676 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {
\r
677 *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flags */
\r
678 qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl);
\r
681 // IB_QP_ALT_PATH: Convert alternate RC AV
\r
682 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {
\r
683 *qp_attr_mask_p |= IB_QP_ALT_PATH; /* optional flag */
\r
684 err = mlnx_conv_ibal_av(ib_qp_p->device,
\r
685 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);
\r
690 qp_attr_p->alt_timeout = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv
\r
694 case IB_QPT_UNRELIABLE_CONN:
\r
695 if (qp_p->state != IBQPS_RTS)
\r
696 *qp_attr_mask_p |= /* required flags */
\r
699 // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)
\r
700 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {
\r
701 *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC;
\r
702 qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;
\r
705 #ifdef WIN_TO_BE_REMOVED
\r
706 //TODO: do we need that ?
\r
707 // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.
\r
709 // IB_QP_PKEY_INDEX
\r
710 if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) {
\r
711 *qp_attr_mask_p |= IB_QP_PKEY_INDEX;
\r
712 qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;
\r
716 // IB_QP_PATH_MIG_STATE
\r
717 if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) {
\r
718 *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE;
\r
719 qp_attr_p->path_mig_state = modify_attr_p->state.rts.apm_state;
\r
722 // IB_QP_ACCESS_FLAGS
\r
723 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {
\r
724 *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flags */
\r
725 qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl);
\r
728 // IB_QP_ALT_PATH: Convert alternate RC AV
\r
729 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {
\r
730 *qp_attr_mask_p |= IB_QP_ALT_PATH; /* optional flag */
\r
731 err = mlnx_conv_ibal_av(ib_qp_p->device,
\r
732 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);
\r
740 case IB_QPT_UNRELIABLE_DGRM:
\r
744 if (qp_p->state != IBQPS_RTS)
\r
745 *qp_attr_mask_p |= /* required flags */
\r
749 if (modify_attr_p->state.rts.opts & IB_MOD_QP_QKEY) {
\r
750 *qp_attr_mask_p |= IB_QP_QKEY;
\r
751 qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.rts.qkey);
\r
759 // IB_QP_SQ_PSN: common for all
\r
760 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);
\r
761 //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL
\r
765 case IB_QPS_SQD_DRAINING:
\r
766 case IB_QPS_SQD_DRAINED:
\r
767 *qp_attr_mask_p |= IB_QP_EN_SQD_ASYNC_NOTIFY;
\r
768 qp_attr_p->en_sqd_async_notify = (u8)modify_attr_p->state.sqd.sqd_event;
\r
769 HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n"));
\r
773 //NB: is this an error case and we need this message ? What about returning an error ?
\r
774 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", modify_attr_p->req_state));
\r
784 IN const struct ib_device *ib_dev_p,
\r
785 IN const ib_av_attr_t *ibal_av_p,
\r
786 OUT struct ib_ah_attr *ah_attr_p)
\r
792 ah_attr_p->port_num = ibal_av_p->port_num;
\r
793 ah_attr_p->sl = ibal_av_p->sl;
\r
794 ah_attr_p->dlid = cl_ntoh16(ibal_av_p->dlid);
\r
795 //TODO: how static_rate is coded ?
\r
796 ah_attr_p->static_rate =
\r
797 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3);
\r
798 ah_attr_p->src_path_bits = ibal_av_p->path_bits; // PATH:
\r
800 /* For global destination or Multicast address:*/
\r
801 if (ibal_av_p->grh_valid)
\r
803 ah_attr_p->ah_flags |= IB_AH_GRH;
\r
804 ah_attr_p->grh.hop_limit = ibal_av_p->grh.hop_limit;
\r
805 ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,
\r
806 &ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label );
\r
807 err = ib_find_cached_gid((struct ib_device *)ib_dev_p,
\r
808 (union ib_gid *)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index);
\r
810 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default: sgid_index = 0\n", err, err));
\r
813 else if (port_num != ah_attr_p->port_num) {
\r
814 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n",
\r
815 (u32)port_num, (u32)ah_attr_p->port_num));
\r
817 ah_attr_p->grh.sgid_index = (u8)gid_index;
\r
818 RtlCopyMemory(ah_attr_p->grh.dgid.raw, ibal_av_p->grh.dest_gid.raw, sizeof(ah_attr_p->grh.dgid));
\r
825 mlnx_conv_mthca_av(
\r
826 IN const struct ib_ah *ib_ah_p,
\r
827 OUT ib_av_attr_t *ibal_av_p)
\r
830 struct ib_ud_header header;
\r
831 struct mthca_ah *ah_p = (struct mthca_ah *)ib_ah_p;
\r
832 struct ib_device *ib_dev_p = ib_ah_p->pd->device;
\r
833 struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;
\r
835 err = mthca_read_ah( dev_p, ah_p, &header);
\r
840 ibal_av_p->sl = header.lrh.service_level;
\r
841 mthca_get_av_params(ah_p, &ibal_av_p->port_num,
\r
842 &ibal_av_p->dlid, &ibal_av_p->static_rate, &ibal_av_p->path_bits );
\r
845 ibal_av_p->grh_valid = header.grh_present;
\r
846 if (ibal_av_p->grh_valid) {
\r
847 ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow(
\r
848 header.grh.ip_version, header.grh.traffic_class, header.grh.flow_label );
\r
849 ibal_av_p->grh.hop_limit = header.grh.hop_limit;
\r
850 RtlCopyMemory(ibal_av_p->grh.src_gid.raw,
\r
851 header.grh.source_gid.raw, sizeof(ibal_av_p->grh.src_gid));
\r
852 RtlCopyMemory(ibal_av_p->grh.src_gid.raw,
\r
853 header.grh.destination_gid.raw, sizeof(ibal_av_p->grh.dest_gid));
\r
856 //TODO: don't know, how to fill conn. Note, that previous version didn't fill it also.
\r
864 IN const struct ib_ah *ib_ah_p,
\r
865 IN const struct ib_ah_attr *ah_attr_p)
\r
867 struct ib_device *ib_dev_p = ib_ah_p->pd->device;
\r
868 struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;
\r
870 mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p );
\r