2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
4 * This software is available to you under the OpenIB.org BSD license
\r
7 * Redistribution and use in source and binary forms, with or
\r
8 * without modification, are permitted provided that the following
\r
9 * conditions are met:
\r
11 * - Redistributions of source code must retain the above
\r
12 * copyright notice, this list of conditions and the following
\r
15 * - Redistributions in binary form must reproduce the above
\r
16 * copyright notice, this list of conditions and the following
\r
17 * disclaimer in the documentation and/or other materials
\r
18 * provided with the distribution.
\r
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
32 /* Registers a memory region */
\r
34 #include "ibspdll.h"
\r
37 __forceinline boolean_t
\r
39 IN struct memory_reg *p_reg,
\r
40 IN ib_access_t acl_mask,
\r
44 return( (p_reg->type.access_ctrl & acl_mask) == acl_mask &&
\r
45 start >= p_reg->type.vaddr &&
\r
46 ((uintn_t)start) + len <=
\r
47 ((uintn_t)(uint64_t)p_reg->type.vaddr) + p_reg->type.length );
\r
51 /* Find the first registered mr that matches the given region.
\r
52 * mem_list is either socket_info->buf_mem_list or socket_info->rdma_mem_list.
\r
54 struct memory_node *
\r
56 IN struct ibsp_socket_info *s,
\r
57 IN ib_access_t acl_mask,
\r
61 struct memory_node *p_node;
\r
62 cl_list_item_t *p_item;
\r
64 IBSP_ENTER( IBSP_DBG_MEM );
\r
66 cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex );
\r
68 for( p_item = cl_qlist_head( &s->mr_list );
\r
69 p_item != cl_qlist_end( &s->mr_list );
\r
70 p_item = cl_qlist_next( p_item ) )
\r
72 p_node = PARENT_STRUCT( p_item, struct memory_node, socket_item );
\r
74 if( __check_mr( p_node->p_reg, acl_mask, start, len ) )
\r
76 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
77 IBSP_EXIT( IBSP_DBG_MEM );
\r
82 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
84 IBSP_TRACE_EXIT( IBSP_DBG_MEM, ("mr not found\n") );
\r
89 /* Registers a memory region. The memory region might be cached.
\r
90 * mem_list is either socket_info->buf_mem_list or hca->rdma_mem_list.
\r
92 struct memory_node *
\r
94 IN struct ibsp_socket_info *s,
\r
95 IN ib_pd_handle_t pd,
\r
98 IN ib_access_t access_ctrl,
\r
101 struct memory_node *p_node;
\r
102 struct memory_reg *p_reg;
\r
103 cl_list_item_t *p_item;
\r
104 ib_api_status_t status;
\r
106 IBSP_ENTER( IBSP_DBG_MEM );
\r
108 CL_ASSERT( start != NULL );
\r
109 CL_ASSERT( len != 0 );
\r
110 CL_ASSERT( (access_ctrl & ~(IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE)) ==
\r
113 /* Optimistically allocate a tracking structure. */
\r
114 p_node = HeapAlloc( g_ibsp.heap, 0, sizeof(struct memory_node) );
\r
118 ("AllocateOverlappedBuf:HeapAlloc() failed: %d\n",
\r
120 *lpErrno = WSAENOBUFS;
\r
124 /* First, try to find a suitable MR */
\r
125 cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex );
\r
127 /* Find the first registered mr that matches the given region. */
\r
128 for( p_item = cl_qlist_head( &s->port->hca->rdma_mem_list.list );
\r
129 p_item != cl_qlist_end( &s->port->hca->rdma_mem_list.list );
\r
130 p_item = cl_qlist_next( p_item ) )
\r
132 p_reg = PARENT_STRUCT(p_item, struct memory_reg, item);
\r
134 if( __check_mr( p_reg, access_ctrl, start, len ) )
\r
136 p_node->p_reg = p_reg;
\r
138 cl_qlist_insert_tail( &p_reg->node_list, &p_node->mr_item );
\r
139 cl_qlist_insert_head(
\r
140 &s->mr_list, &p_node->socket_item );
\r
141 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
142 IBSP_EXIT( IBSP_DBG_MEM );
\r
147 /* No corresponding MR has been found. Create a new one. */
\r
148 p_reg = HeapAlloc( g_ibsp.heap, 0, sizeof(struct memory_reg) );
\r
153 ("AllocateOverlappedBuf:HeapAlloc() failed: %d\n",
\r
155 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
156 HeapFree( g_ibsp.heap, 0, p_node );
\r
157 *lpErrno = WSAENOBUFS;
\r
161 /* The node is not initialized yet. All the parameters given are
\r
162 * supposed to be valid so we don't check them. */
\r
163 cl_qlist_init( &p_reg->node_list );
\r
164 p_reg->type.vaddr = start;
\r
165 p_reg->type.length = len;
\r
166 p_reg->type.access_ctrl = access_ctrl;
\r
168 IBSP_TRACE2( IBSP_DBG_MEM, ("pinning memory node %p\n", p_node) );
\r
169 status = ib_reg_mem(
\r
170 pd, &p_reg->type, &p_reg->lkey, &p_reg->rkey, &p_reg->mr_handle );
\r
174 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
175 HeapFree( g_ibsp.heap, 0, p_reg );
\r
176 HeapFree( g_ibsp.heap, 0, p_node );
\r
179 ("ib_reg_mem returned %s\n", ib_get_err_str(status)) );
\r
181 *lpErrno = WSAEFAULT;
\r
185 STAT_INC( mr_num );
\r
187 p_node->p_reg = p_reg;
\r
190 /* Link to the list of nodes. */
\r
191 cl_qlist_insert_head( &s->port->hca->rdma_mem_list.list, &p_reg->item );
\r
192 cl_qlist_insert_head( &s->mr_list, &p_node->socket_item );
\r
193 cl_qlist_insert_tail( &p_reg->node_list, &p_node->mr_item );
\r
194 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
196 IBSP_EXIT( IBSP_DBG_MEM );
\r
203 static inline int __ibsp_dereg_mem_mr(
\r
204 IN struct memory_node *node )
\r
206 IBSP_ENTER( IBSP_DBG_MEM );
\r
208 cl_qlist_remove_item( &node->p_reg->node_list, &node->mr_item );
\r
209 cl_qlist_remove_item( &node->s->mr_list, &node->socket_item );
\r
211 HeapFree( g_ibsp.heap, 0, node );
\r
213 IBSP_EXIT( IBSP_DBG_MEM );
\r
218 /* Deregisters a memory region */
\r
221 IN struct ibsp_socket_info *s,
\r
222 IN struct memory_node *node,
\r
223 OUT LPINT lpErrno )
\r
225 IBSP_ENTER( IBSP_DBG_MEM );
\r
227 cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex );
\r
228 *lpErrno = __ibsp_dereg_mem_mr( node );
\r
229 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
231 IBSP_EXIT( IBSP_DBG_MEM );
\r
232 return (*lpErrno? SOCKET_ERROR : 0);
\r
237 * Deregister the remaining memory regions on an HCA. This function should
\r
238 * only be called before destroying the PD. In normal case, the list should
\r
239 * be empty because the switch should have done it.
\r
243 IN struct mr_list *mem_list )
\r
245 cl_list_item_t *item;
\r
247 IBSP_ENTER( IBSP_DBG_MEM );
\r
249 cl_spinlock_acquire( &mem_list->mutex );
\r
250 IBSP_TRACE1( IBSP_DBG_MEM,
\r
251 ("%d registrations.\n", cl_qlist_count( &mem_list->list )) );
\r
253 for( item = cl_qlist_remove_head( &mem_list->list );
\r
254 item != cl_qlist_end( &mem_list->list );
\r
255 item = cl_qlist_remove_head( &mem_list->list ) )
\r
257 struct memory_reg *p_reg = PARENT_STRUCT(item, struct memory_reg, item);
\r
258 ib_api_status_t status;
\r
260 while( cl_qlist_count( &p_reg->node_list ) )
\r
262 struct memory_node *p_node =
\r
263 PARENT_STRUCT( cl_qlist_head( &p_reg->node_list ),
\r
264 struct memory_node, mr_item );
\r
266 __ibsp_dereg_mem_mr( p_node );
\r
269 IBSP_TRACE2( IBSP_DBG_MEM, ("unpinning ,memory reg %p\n", p_reg) );
\r
270 status = ib_dereg_mr( p_reg->mr_handle );
\r
274 ("ib_dereg_mem returned %s\n", ib_get_err_str( status )) );
\r
278 STAT_DEC( mr_num );
\r
281 HeapFree( g_ibsp.heap, 0, p_reg );
\r
284 cl_spinlock_release( &mem_list->mutex );
\r
286 IBSP_EXIT( IBSP_DBG_MEM );
\r
290 /* Deregister the remaining memory regions. This function should only
\r
291 * be called when destroying the socket. In normal case, the list should
\r
292 * be empty because the switch should have done it. */
\r
295 IN struct ibsp_socket_info *s )
\r
297 IBSP_ENTER( IBSP_DBG_MEM );
\r
301 CL_ASSERT( !cl_qlist_count( &s->mr_list ) );
\r
302 IBSP_EXIT( IBSP_DBG_MEM );
\r
306 cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex );
\r
307 IBSP_TRACE1( IBSP_DBG_MEM,
\r
308 ("%d registrations.\n", cl_qlist_count( &s->mr_list )) );
\r
310 while( cl_qlist_count( &s->mr_list ) )
\r
312 __ibsp_dereg_mem_mr( PARENT_STRUCT( cl_qlist_head( &s->mr_list ),
\r
313 struct memory_node, socket_item) );
\r
316 cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex );
\r
318 IBSP_EXIT( IBSP_DBG_MEM );
\r
323 * Loop through all the memory registrations on an HCA and release
\r
324 * all that fall within the specified range.
\r
327 ibsp_hca_flush_mr_cache(
\r
328 IN struct ibsp_hca *p_hca,
\r
329 IN LPVOID lpvAddress,
\r
332 struct memory_reg *p_reg;
\r
333 cl_list_item_t *p_item;
\r
334 ib_api_status_t status;
\r
336 IBSP_ENTER( IBSP_DBG_MEM );
\r
338 cl_spinlock_acquire( &p_hca->rdma_mem_list.mutex );
\r
339 for( p_item = cl_qlist_head( &p_hca->rdma_mem_list.list );
\r
340 p_item != cl_qlist_end( &p_hca->rdma_mem_list.list );
\r
341 p_item = cl_qlist_next( p_item ) )
\r
343 p_reg = PARENT_STRUCT( p_item, struct memory_reg, item );
\r
345 if( lpvAddress > p_reg->type.vaddr ||
\r
346 ((uintn_t)lpvAddress) + Size <
\r
347 ((uintn_t)(uint64_t)p_reg->type.vaddr) + p_reg->type.length )
\r
352 /* Release all socket's nodes that reference this registration. */
\r
353 while( cl_qlist_count( &p_reg->node_list ) )
\r
355 struct memory_node *p_node =
\r
356 PARENT_STRUCT( cl_qlist_head( &p_reg->node_list ),
\r
357 struct memory_node, mr_item );
\r
359 __ibsp_dereg_mem_mr( p_node );
\r
362 /* Move to the previous item so the for loop properly moves forward. */
\r
363 p_item = cl_qlist_prev( p_item );
\r
365 cl_qlist_remove_item( &p_hca->rdma_mem_list.list, &p_reg->item );
\r
367 status = ib_dereg_mr( p_reg->mr_handle );
\r
368 if( status != IB_SUCCESS )
\r
371 ("ib_dereg_mr returned %s\n", ib_get_err_str(status)) );
\r
374 HeapFree( g_ibsp.heap, 0, p_reg );
\r
376 cl_spinlock_release( &p_hca->rdma_mem_list.mutex );
\r
378 IBSP_EXIT( IBSP_DBG_MEM );
\r