2 * Copyright (c) 2008 Microsoft Corporation. All rights reserved.
\r
4 * This software is available to you under the OpenIB.org BSD license
\r
7 * Redistribution and use in source and binary forms, with or
\r
8 * without modification, are permitted provided that the following
\r
9 * conditions are met:
\r
11 * - Redistributions of source code must retain the above
\r
12 * copyright notice, this list of conditions and the following
\r
15 * - Redistributions in binary form must reproduce the above
\r
16 * copyright notice, this list of conditions and the following
\r
17 * disclaimer in the documentation and/or other materials
\r
18 * provided with the distribution.
\r
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
32 #include "NdEndpoint.h"
\r
34 #include "NdAdapter.h"
\r
36 #include "NdListen.h"
\r
39 #pragma warning( push, 3 )
\r
40 #include "winternl.h"
\r
41 #pragma warning( pop )
\r
42 #include "nddebug.h"
\r
44 extern uint32_t g_nd_max_inline_size;
\r
46 #if defined(EVENT_TRACING)
\r
50 #include "NdEndpoint.tmh"
\r
54 dbg_data g = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
\r
59 #define SIZE_MAX _UI64_MAX
\r
61 #define SIZE_MAX UINT_MAX
\r
65 namespace NetworkDirect
\r
68 ///////////////////////////////////////////////////////////////////////////////
\r
70 // HPC Pack 2008 Beta 2 SPI
\r
72 ///////////////////////////////////////////////////////////////////////////////
\r
75 CEndpoint::CEndpoint(void) :
\r
82 CEndpoint::~CEndpoint(void)
\r
88 m_pParent->Release();
\r
91 HRESULT CEndpoint::Initialize(
\r
92 __in CAdapter* pParent,
\r
93 __in CCq* pInboundCq,
\r
94 __in CCq* pOutboundCq,
\r
95 __in SIZE_T nInboundEntries,
\r
96 __in SIZE_T nOutboundEntries,
\r
97 __in SIZE_T nInboundSge,
\r
98 __in SIZE_T nOutboundSge,
\r
99 __in SIZE_T InboundReadLimit,
\r
100 __in SIZE_T OutboundReadLimit,
\r
101 __out_opt SIZE_T* pMaxInlineData
\r
104 ND_ENTER( ND_DBG_NDI );
\r
106 if( InboundReadLimit > UCHAR_MAX )
\r
107 return ND_INVALID_PARAMETER_8;
\r
109 if( OutboundReadLimit > UCHAR_MAX )
\r
110 return ND_INVALID_PARAMETER_9;
\r
112 m_pParent = pParent;
\r
113 m_pParent->AddRef();
\r
116 m_pParent->m_Ifc.user_verbs.pre_create_qp != NULL ||
\r
117 m_pParent->m_Ifc.user_verbs.post_create_qp != NULL ||
\r
118 m_pParent->m_Ifc.user_verbs.nd_modify_qp != NULL ||
\r
119 m_pParent->m_Ifc.user_verbs.nd_get_qp_state != NULL ||
\r
120 m_pParent->m_Ifc.user_verbs.pre_destroy_qp != NULL ||
\r
121 m_pParent->m_Ifc.user_verbs.post_destroy_qp != NULL ||
\r
122 m_pParent->m_Ifc.user_verbs.post_query_qp != NULL ||
\r
123 m_pParent->m_Ifc.user_verbs.post_send != NULL ||
\r
124 m_pParent->m_Ifc.user_verbs.post_recv != NULL /*||
\r
125 m_pParent->m_Ifc.user_verbs.bind_mw != NULL*/ );
\r
127 m_MaxInlineSize = g_nd_max_inline_size;
\r
129 HRESULT hr = CreateQp(
\r
143 ib_qp_attr_t qp_attr;
\r
144 hr = QueryQp(&qp_attr);
\r
145 if( FAILED( hr ) ) {
\r
150 m_MaxInlineSize = (UINT32)qp_attr.sq_max_inline;
\r
153 m_Ird = (UINT8)InboundReadLimit;
\r
154 m_Ord = (UINT8)OutboundReadLimit;
\r
156 // Move the QP to the INIT state so users can post receives.
\r
157 hr = ModifyQp( IB_QPS_INIT );
\r
161 if( SUCCEEDED( hr ) && pMaxInlineData != NULL )
\r
162 *pMaxInlineData = m_MaxInlineSize;
\r
167 HRESULT CEndpoint::Create(
\r
168 __in CAdapter* pParent,
\r
169 __in CCq* pInboundCq,
\r
170 __in CCq* pOutboundCq,
\r
171 __in SIZE_T nInboundEntries,
\r
172 __in SIZE_T nOutboundEntries,
\r
173 __in SIZE_T nInboundSge,
\r
174 __in SIZE_T nOutboundSge,
\r
175 __in SIZE_T InboundReadLimit,
\r
176 __in SIZE_T OutboundReadLimit,
\r
177 __out_opt SIZE_T* pMaxInlineData,
\r
178 __out INDEndpoint** ppEndpoint
\r
181 CEndpoint* pEp = new CEndpoint();
\r
183 return ND_NO_MEMORY;
\r
185 HRESULT hr = pEp->Initialize(
\r
208 HRESULT CEndpoint::QueryInterface(
\r
213 if( IsEqualIID( riid, IID_IUnknown ) )
\r
219 if( IsEqualIID( riid, IID_INDEndpoint ) )
\r
225 return E_NOINTERFACE;
\r
228 ULONG CEndpoint::AddRef(void)
\r
230 return InterlockedIncrement( &m_nRef );
\r
233 ULONG CEndpoint::Release(void)
\r
235 ULONG ref = InterlockedDecrement( &m_nRef );
\r
242 // *** INDEndpoint methods ***
\r
243 HRESULT CEndpoint::Flush(void)
\r
245 return ModifyQp( IB_QPS_ERROR );
\r
248 void CEndpoint::StartRequestBatch(void)
\r
253 void CEndpoint::SubmitRequestBatch(void)
\r
258 HRESULT CEndpoint::Send(
\r
259 __out ND_RESULT* pResult,
\r
260 __in_ecount(nSge) const ND_SGE* pSgl,
\r
266 ib_local_ds_t* pDs;
\r
267 ib_local_ds_t ds[4];
\r
269 if( nSge > UINT_MAX )
\r
270 return ND_DATA_OVERRUN;
\r
271 else if( nSge <= 4 )
\r
275 pDs = new ib_local_ds_t[nSge];
\r
277 return ND_NO_MEMORY;
\r
280 pResult->BytesTransferred = 0;
\r
281 for( SIZE_T i = 0; i < nSge; i++ )
\r
283 pDs[i].vaddr = (ULONG_PTR)pSgl[i].pAddr;
\r
284 if( pSgl[i].Length > UINT_MAX )
\r
288 return ND_BUFFER_OVERFLOW;
\r
290 pDs[i].length = (uint32_t)pSgl[i].Length;
\r
291 pDs[i].lkey = ((CMr*)pSgl[i].hMr)->mr_ioctl.out.lkey;
\r
293 // Send completions don't include the length. It's going to
\r
294 // be all or nothing, so store it now and we can reset if the
\r
296 pResult->BytesTransferred += pSgl[i].Length;
\r
300 wr.wr_id = (ULONG_PTR)pResult;
\r
301 wr.wr_type = WR_SEND;
\r
302 if ( pResult->BytesTransferred <= m_MaxInlineSize )
\r
303 wr.send_opt = IB_SEND_OPT_INLINE;
\r
307 if( !(Flags & ND_OP_FLAG_SILENT_SUCCESS) )
\r
308 wr.send_opt |= IB_SEND_OPT_SIGNALED;
\r
309 if( Flags & ND_OP_FLAG_READ_FENCE )
\r
310 wr.send_opt |= IB_SEND_OPT_FENCE;
\r
311 if( Flags & ND_OP_FLAG_SEND_AND_SOLICIT_EVENT )
\r
312 wr.send_opt |= IB_SEND_OPT_SOLICITED;
\r
313 wr.num_ds = (uint32_t)nSge;
\r
316 ib_api_status_t status =
\r
317 m_pParent->m_Ifc.user_verbs.post_send( m_uQp, &wr, NULL );
\r
322 CL_ASSERT( nSge || pSgl == NULL );
\r
329 g.snd_bytes += pSgl[0].Length;
\r
342 case IB_INSUFFICIENT_RESOURCES:
\r
343 return ND_NO_MORE_ENTRIES;
\r
344 case IB_INVALID_MAX_SGE:
\r
345 return ND_DATA_OVERRUN;
\r
346 case IB_INVALID_QP_STATE:
\r
347 return ND_CONNECTION_INVALID;
\r
349 return ND_UNSUCCESSFUL;
\r
353 HRESULT CEndpoint::SendAndInvalidate(
\r
354 __out ND_RESULT* pResult,
\r
355 __in_ecount(nSge) const ND_SGE* pSgl,
\r
357 __in const ND_MW_DESCRIPTOR* pRemoteMwDescriptor,
\r
361 ND_ENTER( ND_DBG_NDI );
\r
364 ib_local_ds_t* pDs;
\r
366 if( nSge > UINT_MAX )
\r
367 return ND_DATA_OVERRUN;
\r
369 pDs = new ib_local_ds_t[nSge];
\r
371 return ND_NO_MEMORY;
\r
373 pResult->BytesTransferred = 0;
\r
374 for( SIZE_T i = 0; i < nSge; i++ )
\r
376 pDs[i].vaddr = (ULONG_PTR)pSgl[i].pAddr;
\r
377 if( pSgl[i].Length > UINT_MAX )
\r
380 return ND_BUFFER_OVERFLOW;
\r
382 pDs[i].length = (uint32_t)pSgl[i].Length;
\r
383 pDs[i].lkey = ((CMr*)pSgl[i].hMr)->mr_ioctl.out.lkey;
\r
385 // Send completions don't include the length. It's going to
\r
386 // be all or nothing, so store it now and we can reset if the
\r
388 pResult->BytesTransferred += pSgl[i].Length;
\r
392 wr.wr_id = (ULONG_PTR)pResult;
\r
393 wr.wr_type = WR_SEND;
\r
394 if ( pResult->BytesTransferred <= m_MaxInlineSize )
\r
395 wr.send_opt = IB_SEND_OPT_INLINE;
\r
398 // We simulate invalidate operations (since we simulate MW use). We
\r
399 // put the RKey in the immediate data, the recipient will do the
\r
400 // lookup of the MW based on that (as they would with a regular
\r
401 // invalidate request).
\r
402 wr.send_opt |= IB_SEND_OPT_IMMEDIATE;
\r
403 if( !(Flags & ND_OP_FLAG_SILENT_SUCCESS) )
\r
404 wr.send_opt |= IB_SEND_OPT_SIGNALED;
\r
405 if( Flags & ND_OP_FLAG_READ_FENCE )
\r
406 wr.send_opt |= IB_SEND_OPT_FENCE;
\r
407 if( Flags & ND_OP_FLAG_SEND_AND_SOLICIT_EVENT )
\r
408 wr.send_opt |= IB_SEND_OPT_SOLICITED;
\r
409 wr.num_ds = (uint32_t)nSge;
\r
411 // Put the RKey in the immeditate data.
\r
412 wr.immediate_data = pRemoteMwDescriptor->Token;
\r
414 ib_api_status_t status =
\r
415 m_pParent->m_Ifc.user_verbs.post_send( m_uQp, &wr, NULL );
\r
422 case IB_INSUFFICIENT_RESOURCES:
\r
423 return ND_NO_MORE_ENTRIES;
\r
424 case IB_INVALID_MAX_SGE:
\r
425 return ND_DATA_OVERRUN;
\r
426 case IB_INVALID_QP_STATE:
\r
427 return ND_CONNECTION_INVALID;
\r
429 return ND_UNSUCCESSFUL;
\r
433 HRESULT CEndpoint::Receive(
\r
434 __out ND_RESULT* pResult,
\r
435 __in_ecount(nSge) const ND_SGE* pSgl,
\r
440 if (!(++g.rcv_cnt % 1000))
\r
441 ND_PRINT( TRACE_LEVEL_VERBOSE, ND_DBG_NDI,
\r
442 ("==> %s, cnt %I64d, rcv %I64d:%I64d:%I64d:%I64d\n",
\r
443 __FUNCTION__, g.rcv_cnt, g.rcv_pkts, g.rcv_bytes, g.rcv_pkts_err, g.rcv_pkts_zero ));
\r
446 ib_local_ds_t* pDs;
\r
447 ib_local_ds_t ds[4];
\r
449 if( nSge > UINT_MAX )
\r
450 return ND_DATA_OVERRUN;
\r
451 else if( nSge <= 4 )
\r
455 pDs = new ib_local_ds_t[nSge];
\r
457 return ND_NO_MEMORY;
\r
460 for( SIZE_T i = 0; i < nSge; i++ )
\r
462 pDs[i].vaddr = (ULONG_PTR)pSgl[i].pAddr;
\r
463 if( pSgl[i].Length > UINT_MAX )
\r
467 return ND_BUFFER_OVERFLOW;
\r
469 pDs[i].length = (uint32_t)pSgl[i].Length;
\r
470 pDs[i].lkey = ((CMr*)pSgl[i].hMr)->mr_ioctl.out.lkey;
\r
474 wr.wr_id = (ULONG_PTR)pResult;
\r
475 wr.num_ds = (uint32_t)nSge;
\r
478 ib_api_status_t status =
\r
479 m_pParent->m_Ifc.user_verbs.post_recv( m_uQp, &wr, NULL );
\r
485 CL_ASSERT( nSge || pSgl == NULL );
\r
492 g.rcv_bytes += pSgl[0].Length;
\r
505 case IB_INSUFFICIENT_RESOURCES:
\r
506 return ND_NO_MORE_ENTRIES;
\r
507 case IB_INVALID_MAX_SGE:
\r
508 return ND_DATA_OVERRUN;
\r
509 case IB_INVALID_QP_STATE:
\r
510 return ND_CONNECTION_INVALID;
\r
512 return ND_UNSUCCESSFUL;
\r
516 HRESULT CEndpoint::Bind(
\r
517 __out ND_RESULT* pResult,
\r
518 __in ND_MR_HANDLE hMr,
\r
519 __in INDMemoryWindow* pMw,
\r
520 __in_bcount(BufferSize) const void* pBuffer,
\r
521 __in SIZE_T BufferSize,
\r
523 __out ND_MW_DESCRIPTOR* pMwDescriptor
\r
526 ND_ENTER( ND_DBG_NDI );
\r
528 UNREFERENCED_PARAMETER( pMw );
\r
529 UNREFERENCED_PARAMETER( Flags );
\r
531 CMr* pMr = ((CMr*)hMr);
\r
533 if( pBuffer < pMr->pBase ||
\r
534 pBuffer > pMr->pBase + pMr->Length )
\r
536 return ND_INVALID_PARAMETER_4;
\r
539 if( ((const char*)pBuffer + BufferSize) > (pMr->pBase + pMr->Length) )
\r
541 return ND_INVALID_PARAMETER_5;
\r
544 // Ok, this here is a workaround since the Mellanox HCA driver doesn't
\r
545 // support MWs. This should be pushed into the HCA driver.
\r
546 pMwDescriptor->Base = _byteswap_uint64( (UINT64)(ULONG_PTR)pBuffer );
\r
547 pMwDescriptor->Length = _byteswap_uint64( BufferSize );
\r
548 pMwDescriptor->Token = pMr->mr_ioctl.out.rkey;
\r
550 // Zero-byte RDMA write. Could also be a no-op on the send queue
\r
551 // which would be better, but requires changing the HCA driver.
\r
554 wr.wr_id = (ULONG_PTR)pResult;
\r
555 wr.wr_type = WR_RDMA_WRITE;
\r
556 wr.send_opt = IB_SEND_OPT_SIGNALED;
\r
558 wr.ds_array = NULL;
\r
560 wr.remote_ops.vaddr = 0;
\r
561 wr.remote_ops.rkey = 0;
\r
563 pResult->BytesTransferred = 0;
\r
565 // TODO: Track the MW by rkey, so we can unbind it.
\r
567 ib_api_status_t status =
\r
568 m_pParent->m_Ifc.user_verbs.post_send( m_uQp, &wr, NULL );
\r
574 case IB_INSUFFICIENT_RESOURCES:
\r
575 return ND_NO_MORE_ENTRIES;
\r
576 case IB_INVALID_QP_STATE:
\r
577 return ND_CONNECTION_INVALID;
\r
579 return ND_UNSUCCESSFUL;
\r
583 HRESULT CEndpoint::Invalidate(
\r
584 __out ND_RESULT* pResult,
\r
585 __in INDMemoryWindow* pMw,
\r
589 ND_ENTER( ND_DBG_NDI );
\r
591 UNREFERENCED_PARAMETER( pMw );
\r
592 UNREFERENCED_PARAMETER( Flags );
\r
594 // Zero-byte RDMA write. Could also be a no-op on the send queue
\r
595 // which would be better, but requires changing the HCA driver.
\r
598 wr.wr_id = (ULONG_PTR)pResult;
\r
599 wr.wr_type = WR_RDMA_WRITE;
\r
600 wr.send_opt = IB_SEND_OPT_SIGNALED;
\r
602 wr.ds_array = NULL;
\r
604 wr.remote_ops.vaddr = 0;
\r
605 wr.remote_ops.rkey = 0;
\r
607 pResult->BytesTransferred = 0;
\r
609 ib_api_status_t status =
\r
610 m_pParent->m_Ifc.user_verbs.post_send( m_uQp, &wr, NULL );
\r
615 // TODO: Stop trackign MW
\r
617 case IB_INSUFFICIENT_RESOURCES:
\r
618 return ND_NO_MORE_ENTRIES;
\r
619 case IB_INVALID_QP_STATE:
\r
620 return ND_CONNECTION_INVALID;
\r
622 return ND_UNSUCCESSFUL;
\r
626 HRESULT CEndpoint::Rdma(
\r
627 __out ND_RESULT* pResult,
\r
628 __in ib_wr_type_t Type,
\r
629 __in_ecount(nSge) const ND_SGE* pSgl,
\r
631 __in const ND_MW_DESCRIPTOR* pRemoteMwDescriptor,
\r
632 __in ULONGLONG Offset,
\r
636 // ND_ENTER( ND_DBG_NDI );
\r
639 ib_local_ds_t* pDs;
\r
640 ib_local_ds_t ds[4];
\r
642 if( nSge > UINT_MAX )
\r
643 return ND_DATA_OVERRUN;
\r
644 else if( nSge <= 4 )
\r
648 pDs = new ib_local_ds_t[nSge];
\r
650 return ND_NO_MEMORY;
\r
653 pResult->BytesTransferred = 0;
\r
654 for( SIZE_T i = 0; i < nSge; i++ )
\r
656 pDs[i].vaddr = (ULONG_PTR)pSgl[i].pAddr;
\r
657 if( pSgl[i].Length > UINT_MAX )
\r
661 return ND_BUFFER_OVERFLOW;
\r
663 pDs[i].length = (uint32_t)pSgl[i].Length;
\r
664 pDs[i].lkey = ((CMr*)pSgl[i].hMr)->mr_ioctl.out.lkey;
\r
666 //TODO: temporary - a workaround of test bug
\r
668 if( (int)pSgl[i].Length < 0 )
\r
670 pDs[i].length = 0 - (int)pSgl[i].Length;
\r
672 ND_PRINT( TRACE_LEVEL_VERBOSE, ND_DBG_NDI,
\r
673 ("nSge %d, i %d, Length %#x\n", nSge, i, pSgl[i].Length ));
\r
676 return ND_BUFFER_OVERFLOW;
\r
680 // Send completions don't include the length. It's going to
\r
681 // be all or nothing, so store it now and we can reset if the
\r
683 pResult->BytesTransferred += pSgl[i].Length;
\r
687 wr.wr_id = (ULONG_PTR)pResult;
\r
689 if ( (pResult->BytesTransferred <= m_MaxInlineSize) && Type != WR_RDMA_READ)
\r
690 wr.send_opt = IB_SEND_OPT_INLINE;
\r
693 if( !(Flags & ND_OP_FLAG_SILENT_SUCCESS) )
\r
694 wr.send_opt |= IB_SEND_OPT_SIGNALED;
\r
695 if( Flags & ND_OP_FLAG_READ_FENCE )
\r
696 wr.send_opt |= IB_SEND_OPT_FENCE;
\r
697 wr.num_ds = (uint32_t)nSge;
\r
700 UINT64 vaddr = _byteswap_uint64( pRemoteMwDescriptor->Base );
\r
702 wr.remote_ops.vaddr = vaddr;
\r
703 wr.remote_ops.rkey = pRemoteMwDescriptor->Token;
\r
705 ib_api_status_t status =
\r
706 m_pParent->m_Ifc.user_verbs.post_send( m_uQp, &wr, NULL );
\r
715 case IB_INSUFFICIENT_RESOURCES:
\r
716 return ND_NO_MORE_ENTRIES;
\r
717 case IB_INVALID_MAX_SGE:
\r
718 return ND_DATA_OVERRUN;
\r
719 case IB_INVALID_QP_STATE:
\r
720 return ND_CONNECTION_INVALID;
\r
722 return ND_UNSUCCESSFUL;
\r
726 HRESULT CEndpoint::Read(
\r
727 __out ND_RESULT* pResult,
\r
728 __in_ecount(nSge) const ND_SGE* pSgl,
\r
730 __in const ND_MW_DESCRIPTOR* pRemoteMwDescriptor,
\r
731 __in ULONGLONG Offset,
\r
735 // ND_ENTER( ND_DBG_NDI );
\r
737 return Rdma( pResult, WR_RDMA_READ, pSgl, nSge,
\r
738 pRemoteMwDescriptor, Offset, Flags );
\r
741 HRESULT CEndpoint::Write(
\r
742 __out ND_RESULT* pResult,
\r
743 __in_ecount(nSge) const ND_SGE* pSgl,
\r
745 __in const ND_MW_DESCRIPTOR* pRemoteMwDescriptor,
\r
746 __in ULONGLONG Offset,
\r
750 // ND_ENTER( ND_DBG_NDI );
\r
752 return Rdma( pResult, WR_RDMA_WRITE, pSgl, nSge,
\r
753 pRemoteMwDescriptor, Offset, Flags );
\r
756 HRESULT CEndpoint::CreateQp(
\r
757 __in CCq* pInboundCq,
\r
758 __in CCq* pOutboundCq,
\r
759 __in SIZE_T nInboundEntries,
\r
760 __in SIZE_T nOutboundEntries,
\r
761 __in SIZE_T nInboundSge,
\r
762 __in SIZE_T nOutboundSge,
\r
763 __in SIZE_T InboundReadLimit,
\r
764 __in SIZE_T OutboundReadLimit,
\r
765 __in SIZE_T MaxInlineData
\r
768 ND_ENTER( ND_DBG_NDI );
\r
770 if( MaxInlineData > UINT_MAX )
\r
771 return ND_INVALID_PARAMETER_3;
\r
772 if( nInboundEntries > UINT_MAX )
\r
773 return ND_INVALID_PARAMETER_4;
\r
774 if( nOutboundEntries > UINT_MAX )
\r
775 return ND_INVALID_PARAMETER_5;
\r
776 if( nInboundSge > UINT_MAX )
\r
777 return ND_INVALID_PARAMETER_6;
\r
778 if( nOutboundSge > UINT_MAX )
\r
779 return ND_INVALID_PARAMETER_7;
\r
780 if( InboundReadLimit > UCHAR_MAX )
\r
781 return ND_INVALID_PARAMETER_9;
\r
782 if( OutboundReadLimit > UCHAR_MAX )
\r
783 return ND_INVALID_PARAMETER_10;
\r
785 /* Setup the qp_ioctl */
\r
786 ual_create_qp_ioctl_t qp_ioctl;
\r
787 cl_memclr( &qp_ioctl, sizeof(qp_ioctl) );
\r
789 qp_ioctl.in.qp_create.qp_type = IB_QPT_RELIABLE_CONN;
\r
790 qp_ioctl.in.qp_create.sq_depth = (uint32_t)nOutboundEntries;
\r
791 qp_ioctl.in.qp_create.rq_depth = (uint32_t)nInboundEntries;
\r
792 qp_ioctl.in.qp_create.sq_sge = (uint32_t)nOutboundSge;
\r
793 qp_ioctl.in.qp_create.rq_sge = (uint32_t)nInboundSge;
\r
794 qp_ioctl.in.qp_create.sq_max_inline = (uint32_t)MaxInlineData;
\r
795 qp_ioctl.in.qp_create.h_srq = NULL;
\r
796 qp_ioctl.in.qp_create.sq_signaled = FALSE;
\r
798 /* Pre call to the UVP library */
\r
799 CL_ASSERT( m_pParent->m_Ifc.user_verbs.pre_create_qp );
\r
800 qp_ioctl.in.qp_create.h_sq_cq = pOutboundCq->m_uCq;
\r
801 qp_ioctl.in.qp_create.h_rq_cq = pInboundCq->m_uCq;
\r
802 ib_api_status_t status = m_pParent->m_Ifc.user_verbs.pre_create_qp(
\r
804 &qp_ioctl.in.qp_create,
\r
805 &qp_ioctl.in.umv_buf,
\r
806 (ib_qp_handle_t*)(ULONG_PTR)&m_uQp
\r
808 if( status != IB_SUCCESS )
\r
809 return ND_INSUFFICIENT_RESOURCES;
\r
812 * Convert the handles to KAL handles once again starting
\r
813 * from the input qp attribute
\r
815 qp_ioctl.in.h_pd = m_pParent->m_hPd;
\r
816 qp_ioctl.in.qp_create.h_sq_cq = (ib_cq_handle_t)pOutboundCq->m_hCq;
\r
817 qp_ioctl.in.qp_create.h_rq_cq = (ib_cq_handle_t)pInboundCq->m_hCq;
\r
818 qp_ioctl.in.context = (ULONG_PTR)this;
\r
819 qp_ioctl.in.ev_notify = FALSE;
\r
822 BOOL fSuccess = DeviceIoControl(
\r
823 m_pParent->m_hSync,
\r
826 sizeof(qp_ioctl.in),
\r
828 sizeof(qp_ioctl.out),
\r
832 if( fSuccess != TRUE || bytes_ret != sizeof(qp_ioctl.out) )
\r
833 qp_ioctl.out.status = IB_ERROR;
\r
835 /* Post uvp call */
\r
836 CL_ASSERT( m_pParent->m_Ifc.user_verbs.post_create_qp );
\r
837 m_pParent->m_Ifc.user_verbs.post_create_qp(
\r
839 qp_ioctl.out.status,
\r
840 (ib_qp_handle_t*)(ULONG_PTR)&m_uQp,
\r
841 &qp_ioctl.out.umv_buf
\r
845 switch( qp_ioctl.out.status )
\r
848 m_hQp = qp_ioctl.out.h_qp;
\r
849 m_Qpn = qp_ioctl.out.attr.num;
\r
850 ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI,
\r
851 ("Created QP %#I64x, QPn %#x, pd %#I64x, context %p \n",
\r
852 m_hQp, m_Qpn, m_pParent->m_hPd, this ) );
\r
855 case IB_INVALID_MAX_WRS:
\r
856 if( nInboundEntries > nOutboundEntries )
\r
857 return ND_INVALID_PARAMETER_4;
\r
859 return ND_INVALID_PARAMETER_5;
\r
861 case IB_INVALID_MAX_SGE:
\r
862 if( nInboundSge > nOutboundSge )
\r
863 return ND_INVALID_PARAMETER_6;
\r
865 return ND_INVALID_PARAMETER_7;
\r
867 case IB_INSUFFICIENT_MEMORY:
\r
868 return ND_NO_MEMORY;
\r
871 return ND_INSUFFICIENT_RESOURCES;
\r
875 void CEndpoint::DestroyQp()
\r
877 ND_ENTER( ND_DBG_NDI );
\r
879 /* Call the uvp pre call if the vendor library provided a valid QP handle */
\r
880 CL_ASSERT( m_pParent->m_Ifc.user_verbs.pre_destroy_qp );
\r
881 m_pParent->m_Ifc.user_verbs.pre_destroy_qp( m_uQp );
\r
883 ual_destroy_qp_ioctl_t qp_ioctl;
\r
884 cl_memclr( &qp_ioctl, sizeof(qp_ioctl) );
\r
885 qp_ioctl.in.h_qp = m_hQp;
\r
887 ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI,
\r
888 ("Destroy QP %I64x\n", m_hQp) );
\r
891 BOOL fSuccess = DeviceIoControl(
\r
892 m_pParent->m_hSync,
\r
895 sizeof(qp_ioctl.in),
\r
897 sizeof(qp_ioctl.out),
\r
902 if( fSuccess != TRUE || bytes_ret != sizeof(qp_ioctl.out) )
\r
903 qp_ioctl.out.status = IB_ERROR;
\r
905 ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI,
\r
906 ("Destroyed QP %#I64x, QPn %#x, pd %#I64x, context %p \n",
\r
907 m_hQp, m_Qpn, m_pParent->m_hPd, this ) );
\r
909 /* Call vendor's post_destroy_qp */
\r
910 CL_ASSERT( m_pParent->m_Ifc.user_verbs.post_destroy_qp );
\r
911 m_pParent->m_Ifc.user_verbs.post_destroy_qp(
\r
913 qp_ioctl.out.status
\r
918 HRESULT CEndpoint::ModifyQp(
\r
919 __in ib_qp_state_t NewState )
\r
921 ND_ENTER( ND_DBG_NDI );
\r
923 /* Setup the qp_ioctl */
\r
924 ual_ndi_modify_qp_ioctl_in_t qp_ioctl;
\r
925 cl_memclr( &qp_ioctl, sizeof(qp_ioctl) );
\r
930 qp_ioctl.qp_mod.state.init.primary_port = m_pParent->m_PortNum;
\r
931 qp_ioctl.qp_mod.state.init.qkey = 0;
\r
932 qp_ioctl.qp_mod.state.init.pkey_index = 0;
\r
933 qp_ioctl.qp_mod.state.init.access_ctrl =
\r
934 IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE;
\r
939 qp_ioctl.qp_mod.req_state = NewState;
\r
942 /* Call the uvp ND modify verb */
\r
943 CL_ASSERT( m_pParent->m_Ifc.user_verbs.nd_modify_qp );
\r
947 m_pParent->m_Ifc.user_verbs.nd_modify_qp(
\r
953 qp_ioctl.h_qp = m_hQp;
\r
956 BOOL fSuccess = DeviceIoControl(
\r
957 m_pParent->m_hSync,
\r
966 if( fSuccess != TRUE )
\r
967 return ND_UNSUCCESSFUL;
\r
972 HRESULT CEndpoint::QueryQp(
\r
973 __out ib_qp_attr_t *qp_attr
\r
976 ib_api_status_t status;
\r
978 ND_ENTER( ND_DBG_NDI );
\r
980 ual_query_qp_ioctl_t qp_ioctl;
\r
981 cl_memclr( &qp_ioctl, sizeof(qp_ioctl) );
\r
982 qp_ioctl.in.h_qp = m_hQp;
\r
984 /* Call the uvp pre call if the vendor library provided a valid ca handle */
\r
985 if( m_pParent->m_Ifc.user_verbs.pre_query_qp )
\r
987 /* Pre call to the UVP library */
\r
988 status = m_pParent->m_Ifc.user_verbs.pre_query_qp( m_uQp, &qp_ioctl.in.umv_buf );
\r
989 if( status != IB_SUCCESS )
\r
994 BOOL fSuccess = DeviceIoControl(
\r
995 m_pParent->m_hSync,
\r
998 sizeof(qp_ioctl.in),
\r
1000 sizeof(qp_ioctl.out),
\r
1005 if( fSuccess != TRUE || bytes_ret != sizeof(qp_ioctl.out) )
\r
1006 status = IB_ERROR;
\r
1008 status = qp_ioctl.out.status;
\r
1010 /* Call vendor's post_query_qp */
\r
1011 CL_ASSERT( m_pParent->m_Ifc.user_verbs.post_query_qp );
\r
1012 if( m_pParent->m_Ifc.user_verbs.post_query_qp )
\r
1014 m_pParent->m_Ifc.user_verbs.post_query_qp( m_uQp, status,
\r
1015 &qp_ioctl.out.attr, &qp_ioctl.out.umv_buf );
\r
1019 ND_PRINT( TRACE_LEVEL_INFORMATION, ND_DBG_NDI,
\r
1020 ("Queried QP %#I64x, QPn %#x, pd %#I64x, context %p, status %#x \n",
\r
1021 m_hQp, m_Qpn, m_pParent->m_hPd, this, status ) );
\r
1026 *qp_attr = qp_ioctl.out.attr;
\r
1030 return ND_UNSUCCESSFUL;
\r