2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
34 #include <complib/comp_lib.h>
\r
35 #include <iba/ib_al.h>
\r
36 #include <iba/ib_al_ioctl.h>
\r
42 #include "al_debug.h"
\r
43 #if defined(EVENT_TRACING)
\r
47 #include "al_proxy_subnet.tmh"
\r
50 #include "al_mad_pool.h"
\r
55 #include "ib_common.h"
\r
56 #include "al_proxy.h"
\r
59 extern ib_pool_handle_t gh_mad_pool;
\r
66 IN void *p_open_context,
\r
67 IN cl_ioctl_handle_t h_ioctl,
\r
68 OUT size_t *p_ret_bytes )
\r
70 UNUSED_PARAM( p_open_context );
\r
71 UNUSED_PARAM( h_ioctl );
\r
72 UNUSED_PARAM( p_ret_bytes );
\r
78 IN void *p_open_context,
\r
79 IN cl_ioctl_handle_t h_ioctl,
\r
80 OUT size_t *p_ret_bytes )
\r
82 UNUSED_PARAM( p_open_context );
\r
83 UNUSED_PARAM( h_ioctl );
\r
84 UNUSED_PARAM( p_ret_bytes );
\r
91 IN al_sa_req_t *p_sa_req,
\r
92 IN ib_mad_element_t *p_mad_response )
\r
95 IO_STACK_LOCATION *p_io_stack;
\r
96 ual_send_sa_req_ioctl_t *p_ioctl;
\r
97 al_dev_open_context_t *p_context;
\r
100 AL_ENTER( AL_DBG_QUERY );
\r
102 p_irp = (IRP*)p_sa_req->user_context;
\r
103 CL_ASSERT( p_irp );
\r
105 p_io_stack = IoGetCurrentIrpStackLocation( p_irp );
\r
106 p_ioctl = cl_ioctl_out_buf( p_irp );
\r
108 p_context = p_io_stack->FileObject->FsContext;
\r
109 ASSERT( p_context );
\r
110 #pragma warning(push, 3)
\r
111 IoSetCancelRoutine( p_irp, NULL );
\r
112 #pragma warning(pop)
\r
113 /* Clear the pointer to the query to prevent cancelation. */
\r
114 hdl = (size_t)InterlockedExchangePointer(
\r
115 &p_irp->Tail.Overlay.DriverContext[0], AL_INVALID_HANDLE );
\r
117 cl_spinlock_acquire( &p_context->h_al->obj.lock );
\r
118 if( hdl != AL_INVALID_HANDLE )
\r
120 CL_ASSERT( p_sa_req ==
\r
121 al_hdl_chk( p_context->h_al, hdl, AL_OBJ_TYPE_H_SA_REQ ) );
\r
122 al_hdl_free( p_context->h_al, hdl );
\r
125 p_ioctl->out.status = p_sa_req->status;
\r
126 if( p_mad_response )
\r
128 /* Insert an item to track the MAD until the user fetches it. */
\r
129 hdl = al_hdl_insert( p_context->h_al,
\r
130 p_mad_response, AL_OBJ_TYPE_H_MAD );
\r
131 if( hdl != AL_INVALID_HANDLE )
\r
133 p_ioctl->out.h_resp = hdl;
\r
134 p_ioctl->out.resp_size = p_mad_response->size;
\r
138 p_ioctl->out.h_resp = AL_INVALID_HANDLE;
\r
139 p_ioctl->out.resp_size = 0;
\r
140 p_ioctl->out.status = IB_TIMEOUT;
\r
141 ib_put_mad( p_sa_req->p_mad_response );
\r
146 p_ioctl->out.h_resp = AL_INVALID_HANDLE;
\r
147 p_ioctl->out.resp_size = 0;
\r
149 cl_spinlock_release( &p_context->h_al->obj.lock );
\r
151 p_irp->IoStatus.Status = STATUS_SUCCESS;
\r
152 p_irp->IoStatus.Information = sizeof(p_ioctl->out);
\r
153 IoCompleteRequest( p_irp, IO_NO_INCREMENT );
\r
155 /* Release the reference taken when the query was initiated. */
\r
156 proxy_context_deref( p_context );
\r
158 cl_free( p_sa_req );
\r
160 AL_EXIT( AL_DBG_QUERY );
\r
165 __proxy_cancel_sa_req(
\r
166 IN DEVICE_OBJECT* p_dev_obj,
\r
169 al_dev_open_context_t *p_context;
\r
170 PIO_STACK_LOCATION p_io_stack;
\r
172 al_sa_req_t *p_sa_req;
\r
174 AL_ENTER( AL_DBG_DEV );
\r
176 UNUSED_PARAM( p_dev_obj );
\r
178 /* Get the stack location. */
\r
179 p_io_stack = IoGetCurrentIrpStackLocation( p_irp );
\r
181 p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext;
\r
182 ASSERT( p_context );
\r
184 hdl = (size_t)InterlockedExchangePointer(
\r
185 &p_irp->Tail.Overlay.DriverContext[0], NULL );
\r
186 if( hdl != AL_INVALID_HANDLE )
\r
188 #pragma warning(push, 3)
\r
189 IoSetCancelRoutine( p_irp, NULL );
\r
190 #pragma warning(pop)
\r
191 cl_spinlock_acquire( &p_context->h_al->obj.lock );
\r
192 p_sa_req = al_hdl_chk( p_context->h_al, hdl, AL_OBJ_TYPE_H_SA_REQ );
\r
193 CL_ASSERT( p_sa_req );
\r
194 al_cancel_sa_req( p_sa_req );
\r
195 al_hdl_free( p_context->h_al, hdl );
\r
196 cl_spinlock_release( &p_context->h_al->obj.lock );
\r
199 IoReleaseCancelSpinLock( p_irp->CancelIrql );
\r
205 IN void *p_open_context,
\r
206 IN cl_ioctl_handle_t h_ioctl,
\r
207 OUT size_t *p_ret_bytes )
\r
209 ual_send_sa_req_ioctl_t *p_ioctl;
\r
210 cl_status_t status;
\r
211 ib_api_status_t ib_status, *p_usr_status;
\r
212 IO_STACK_LOCATION *p_io_stack;
\r
213 al_dev_open_context_t *p_context;
\r
214 al_sa_req_t *p_sa_req;
\r
215 uint64_t hdl, *p_usr_hdl;
\r
217 AL_ENTER( AL_DBG_QUERY );
\r
219 UNUSED_PARAM( p_ret_bytes );
\r
221 p_context = p_open_context;
\r
223 p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
\r
225 * We support SA requests coming in either through the main file object
\r
226 * or the async file handle.
\r
228 if( p_io_stack->FileObject->FsContext2 &&
\r
229 (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_SA_REQ_SVC )
\r
231 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
232 ("Invalid file object type for request: %016I64x\n",
\r
233 (LONG_PTR)p_io_stack->FileObject->FsContext2) );
\r
234 return CL_INVALID_PARAMETER;
\r
237 /* Check the size of the ioctl */
\r
238 if( cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
239 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
241 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid IOCTL buffers.\n") );
\r
242 return CL_INVALID_PARAMETER;
\r
245 p_ioctl = cl_ioctl_in_buf( h_ioctl );
\r
246 CL_ASSERT( p_ioctl );
\r
248 /* Must save user's pointers in case req completes before call returns. */
\r
249 p_usr_status = p_ioctl->in.p_status;
\r
250 p_usr_hdl = p_ioctl->in.ph_sa_req;
\r
252 if( p_ioctl->in.sa_req.attr_size > IB_SA_DATA_SIZE )
\r
254 ib_status = IB_INVALID_SETTING;
\r
255 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid SA data size: %d\n",
\r
256 p_ioctl->in.sa_req.attr_size) );
\r
257 goto proxy_send_sa_req_err1;
\r
260 p_sa_req = (al_sa_req_t*)cl_zalloc( sizeof(al_sa_req_t) );
\r
263 ib_status = IB_INSUFFICIENT_MEMORY;
\r
264 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate SA req.\n") );
\r
265 goto proxy_send_sa_req_err1;
\r
268 /* Synchronize with callbacks. */
\r
269 cl_spinlock_acquire( &p_context->h_al->obj.lock );
\r
271 /* Track the request. */
\r
272 hdl = al_hdl_insert( p_context->h_al, p_sa_req, AL_OBJ_TYPE_H_SA_REQ );
\r
273 if( hdl == AL_INVALID_HANDLE )
\r
275 ib_status = IB_INSUFFICIENT_MEMORY;
\r
276 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to create handle.\n") );
\r
277 goto proxy_send_sa_req_err2;
\r
281 * Store the handle in the IRP's driver context so we can cancel it.
\r
282 * Note that the handle is really a size_t variable, but is cast to a
\r
283 * uint64_t to provide constant size in mixed 32- and 64-bit environments.
\r
285 h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)hdl;
\r
287 /* Format the SA request */
\r
288 p_sa_req->user_context = h_ioctl;
\r
289 p_sa_req->pfn_sa_req_cb = __proxy_sa_req_cb;
\r
291 p_ioctl->in.sa_req.p_attr = p_ioctl->in.attr;
\r
294 * We never pass the user-mode flag when sending SA requests - the
\r
295 * I/O manager will perform all synchronization to make this IRP sync
\r
298 ib_status = al_send_sa_req( p_sa_req, p_ioctl->in.port_guid,
\r
299 p_ioctl->in.timeout_ms, p_ioctl->in.retry_cnt,
\r
300 &p_ioctl->in.sa_req, 0 );
\r
301 if( ib_status == IB_SUCCESS )
\r
303 /* Hold a reference on the proxy context until the request completes. */
\r
304 proxy_context_ref( p_context );
\r
305 #pragma warning(push, 3)
\r
306 IoSetCancelRoutine( h_ioctl, __proxy_cancel_sa_req );
\r
307 #pragma warning(pop)
\r
308 IoMarkIrpPending( h_ioctl );
\r
310 cl_spinlock_release( &p_context->h_al->obj.lock );
\r
312 cl_copy_to_user( p_usr_hdl, &hdl, sizeof(hdl) );
\r
313 status = CL_PENDING;
\r
317 al_hdl_free( p_context->h_al, hdl );
\r
319 proxy_send_sa_req_err2:
\r
320 cl_spinlock_release( &p_context->h_al->obj.lock );
\r
321 cl_free( p_sa_req );
\r
323 proxy_send_sa_req_err1:
\r
324 status = CL_INVALID_PARAMETER;
\r
327 cl_copy_to_user( p_usr_status, &ib_status, sizeof(ib_api_status_t) );
\r
329 AL_EXIT( AL_DBG_QUERY );
\r
335 proxy_cancel_sa_req(
\r
336 IN void *p_open_context,
\r
337 IN cl_ioctl_handle_t h_ioctl,
\r
338 OUT size_t *p_ret_bytes )
\r
340 ual_cancel_sa_req_ioctl_t *p_ioctl;
\r
341 al_dev_open_context_t *p_context;
\r
342 al_sa_req_t *p_sa_req;
\r
344 AL_ENTER( AL_DBG_QUERY );
\r
346 UNUSED_PARAM( p_ret_bytes );
\r
348 p_context = p_open_context;
\r
350 /* Check the size of the ioctl */
\r
351 if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cancel_sa_req_ioctl_t) ||
\r
352 cl_ioctl_out_size( h_ioctl ) )
\r
354 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid input buffer.\n") );
\r
355 return CL_INVALID_PARAMETER;
\r
358 p_ioctl = cl_ioctl_in_buf( h_ioctl );
\r
359 CL_ASSERT( p_ioctl );
\r
361 cl_spinlock_acquire( &p_context->h_al->obj.lock );
\r
363 al_hdl_chk( p_context->h_al, p_ioctl->h_sa_req, AL_OBJ_TYPE_H_SA_REQ );
\r
365 al_cancel_sa_req( p_sa_req );
\r
366 cl_spinlock_release( &p_context->h_al->obj.lock );
\r
368 AL_EXIT( AL_DBG_QUERY );
\r
375 IN void *p_open_context,
\r
376 IN cl_ioctl_handle_t h_ioctl,
\r
377 OUT size_t *p_ret_bytes )
\r
379 ual_send_mad_ioctl_t *p_ioctl =
\r
380 (ual_send_mad_ioctl_t *)cl_ioctl_in_buf( h_ioctl );
\r
381 al_dev_open_context_t *p_context =
\r
382 (al_dev_open_context_t *)p_open_context;
\r
383 ib_mad_svc_handle_t h_mad_svc;
\r
384 ib_pool_key_t pool_key = NULL;
\r
385 ib_av_handle_t h_av = NULL;
\r
386 ib_mad_element_t *p_mad_el;
\r
387 al_mad_element_t *p_al_el;
\r
388 ib_mad_t *p_mad_buf, *p_usr_buf;
\r
389 ib_grh_t *p_grh, *p_usr_grh;
\r
390 ib_api_status_t status;
\r
392 AL_ENTER( AL_DBG_MAD );
\r
393 /* Validate input buffers. */
\r
394 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
395 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
396 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
398 AL_EXIT( AL_DBG_MAD );
\r
399 return CL_INVALID_PARAMETER;
\r
402 /* Validate mad svc handle. */
\r
403 h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref(
\r
404 p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC );
\r
407 status = IB_INVALID_HANDLE;
\r
408 goto proxy_send_mad_err1;
\r
411 /* Validate the pool key */
\r
412 pool_key = (ib_pool_key_t)al_hdl_ref(
\r
413 p_context->h_al, p_ioctl->in.pool_key, AL_OBJ_TYPE_H_POOL_KEY );
\r
416 status = IB_INVALID_HANDLE;
\r
417 goto proxy_send_mad_err1;
\r
420 /* Validate the AV handle in the mad element if it is not NULL. */
\r
421 if( p_ioctl->in.h_av )
\r
423 h_av = (ib_av_handle_t)
\r
424 al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV );
\r
427 status = IB_INVALID_AV_HANDLE;
\r
428 goto proxy_send_mad_err1;
\r
433 * Get a mad element from kernel MAD pool
\r
434 * This should not fail since the pool is set to grow
\r
437 status = ib_get_mad( pool_key, p_ioctl->in.size, &p_mad_el );
\r
438 if( status != IB_SUCCESS )
\r
439 goto proxy_send_mad_err1;
\r
441 /* Store the MAD and GRH buffers pointers. */
\r
442 p_mad_buf = p_mad_el->p_mad_buf;
\r
443 p_grh = p_mad_el->p_grh;
\r
445 /* Now copy the mad element with all info */
\r
446 status = ib_convert_cl_status( cl_copy_from_user( p_mad_el,
\r
447 p_ioctl->in.p_mad_element, sizeof(ib_mad_element_t) ) );
\r
448 if( status != IB_SUCCESS )
\r
449 goto proxy_send_mad_err2;
\r
451 /* Store the UM pointers. */
\r
452 p_usr_buf = p_mad_el->p_mad_buf;
\r
453 p_usr_grh = p_mad_el->p_grh;
\r
454 /* Restore the MAD and GRH buffer pointers. */
\r
455 p_mad_el->p_mad_buf = p_mad_buf;
\r
456 p_mad_el->p_grh = p_grh;
\r
457 /* Clear the next pointer. */
\r
458 p_mad_el->p_next = NULL;
\r
460 * Override the send context so that a response's MAD has a way
\r
461 * of getting back to the associated send. This is needed because a
\r
462 * MAD receive completion could fail to be delivered to the app even though
\r
463 * the response was properly received in the kernel.
\r
465 p_mad_el->context1 = p_ioctl->in.p_mad_element;
\r
467 /* Set the kernel AV handle. This is either NULL or a valid KM handle. */
\r
468 p_mad_el->h_av = h_av;
\r
470 /* Copy the GRH, if valid. */
\r
471 if( p_mad_el->grh_valid )
\r
473 status = ib_convert_cl_status(
\r
474 cl_copy_from_user( p_grh, p_usr_grh, sizeof(ib_grh_t) ) );
\r
475 if( status != IB_SUCCESS )
\r
476 goto proxy_send_mad_err2;
\r
479 /* Copy the mad payload. */
\r
480 status = ib_convert_cl_status(
\r
481 cl_copy_from_user( p_mad_buf, p_usr_buf, p_ioctl->in.size ) );
\r
482 if( status != IB_SUCCESS )
\r
483 goto proxy_send_mad_err2;
\r
485 /* Copy the handle to UM to allow cancelling. */
\r
486 status = ib_convert_cl_status( cl_copy_to_user(
\r
487 p_ioctl->in.ph_proxy, p_mad_el, sizeof(ib_mad_element_t*) ) );
\r
488 if( status != IB_SUCCESS )
\r
489 goto proxy_send_mad_err2;
\r
492 * Copy the UM element pointer to the kernel's AL element
\r
493 * for use in completion generation.
\r
495 p_al_el = PARENT_STRUCT( p_mad_el, al_mad_element_t, element );
\r
496 p_al_el->h_proxy_element = p_ioctl->in.p_mad_element;
\r
498 /* Post the element. */
\r
499 status = ib_send_mad( h_mad_svc, p_mad_el, NULL );
\r
501 if( status != IB_SUCCESS )
\r
503 proxy_send_mad_err2:
\r
504 ib_put_mad( p_mad_el );
\r
506 proxy_send_mad_err1:
\r
509 deref_al_obj( &h_av->obj );
\r
511 deref_al_obj( &pool_key->obj );
\r
513 deref_al_obj( &h_mad_svc->obj );
\r
515 p_ioctl->out.status = status;
\r
516 *p_ret_bytes = sizeof(p_ioctl->out);
\r
518 AL_EXIT( AL_DBG_MAD );
\r
525 * Process the ioctl to retrieve a received MAD.
\r
529 IN void *p_open_context,
\r
530 IN cl_ioctl_handle_t h_ioctl,
\r
531 OUT size_t *p_ret_bytes )
\r
533 ual_mad_recv_ioctl_t *p_ioctl;
\r
534 al_dev_open_context_t *p_context;
\r
535 ib_mad_element_t *p_mad;
\r
536 ib_api_status_t status;
\r
538 AL_ENTER( AL_DBG_MAD );
\r
540 /* Validate input buffers. */
\r
541 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
542 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
543 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
545 AL_EXIT( AL_DBG_MAD );
\r
546 return CL_INVALID_PARAMETER;
\r
549 p_ioctl = (ual_mad_recv_ioctl_t*)cl_ioctl_in_buf( h_ioctl );
\r
550 p_context = (al_dev_open_context_t*)p_open_context;
\r
552 /* Validate the MAD handle and remove it from the handle manager. */
\r
553 p_mad = al_hdl_get_mad( p_context->h_al, p_ioctl->in.h_mad );
\r
556 status = IB_INVALID_HANDLE;
\r
557 goto proxy_mad_comp_err1;
\r
561 * Return the MAD to the user. The user-mode library is responsible
\r
562 * for correcting all pointers.
\r
564 status = ib_convert_cl_status( cl_copy_to_user(
\r
565 p_ioctl->in.p_user_mad, p_mad, sizeof(ib_mad_element_t) ) );
\r
566 if( status != IB_SUCCESS )
\r
568 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
569 ("Unable to copy element to user's MAD\n") );
\r
570 goto proxy_mad_comp_err2;
\r
573 /* Copy the MAD buffer. */
\r
574 status = ib_convert_cl_status( cl_copy_to_user(
\r
575 p_ioctl->in.p_mad_buf, p_mad->p_mad_buf, p_mad->size ) );
\r
576 if( status != IB_SUCCESS )
\r
578 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
579 ("Unable to copy buffer to user's MAD\n") );
\r
580 goto proxy_mad_comp_err2;
\r
583 /* Copy the GRH if it is valid. */
\r
584 if( p_mad->grh_valid )
\r
586 status = ib_convert_cl_status( cl_copy_to_user(
\r
587 p_ioctl->in.p_grh, p_mad->p_grh, sizeof(ib_grh_t) ) );
\r
588 if( status != IB_SUCCESS )
\r
590 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
591 ("Unable to copy GRH to user's MAD\n") );
\r
592 goto proxy_mad_comp_err2;
\r
596 if( status == IB_SUCCESS )
\r
598 ib_put_mad( p_mad );
\r
602 proxy_mad_comp_err2:
\r
603 ib_put_mad( p_mad );
\r
604 proxy_mad_comp_err1:
\r
605 cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) );
\r
608 p_ioctl->out.status = status;
\r
609 *p_ret_bytes = sizeof(p_ioctl->out);
\r
611 AL_EXIT( AL_DBG_MAD );
\r
619 IN void *p_open_context,
\r
620 IN cl_ioctl_handle_t h_ioctl,
\r
621 OUT size_t *p_ret_bytes )
\r
623 UNUSED_PARAM( p_open_context );
\r
624 UNUSED_PARAM( h_ioctl );
\r
625 UNUSED_PARAM( p_ret_bytes );
\r
632 __proxy_mad_send_cb(
\r
633 IN ib_mad_svc_handle_t h_mad_svc,
\r
634 IN void *mad_svc_context,
\r
635 IN ib_mad_element_t *p_mad_element )
\r
637 misc_cb_ioctl_info_t cb_info;
\r
638 al_dev_open_context_t *p_context;
\r
639 al_mad_element_t *p_al_el;
\r
641 AL_ENTER( AL_DBG_MAD );
\r
643 CL_ASSERT( p_mad_element );
\r
644 CL_ASSERT( !p_mad_element->p_next );
\r
645 p_context = h_mad_svc->obj.h_al->p_context;
\r
646 p_al_el = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );
\r
649 * If we're already closing the device - do not queue a callback, since
\r
650 * we're cleaning up the callback lists.
\r
652 if( proxy_context_ref( p_context ) )
\r
654 /* Set up context and callback record type appropriate for UAL */
\r
655 cb_info.rec_type = MAD_SEND_REC;
\r
656 cb_info.ioctl_rec.mad_send_cb_ioctl_rec.wc_status =
\r
657 p_mad_element->status;
\r
658 cb_info.ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad =
\r
659 p_al_el->h_proxy_element;
\r
660 cb_info.ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context =
\r
663 /* Queue this mad completion notification for the user. */
\r
664 proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,
\r
668 /* Return the MAD. */
\r
669 ib_put_mad( p_mad_element );
\r
671 proxy_context_deref( p_context );
\r
672 AL_EXIT( AL_DBG_MAD );
\r
678 __proxy_mad_recv_cb(
\r
679 IN ib_mad_svc_handle_t h_mad_svc,
\r
680 IN void *mad_svc_context,
\r
681 IN ib_mad_element_t *p_mad_element )
\r
683 misc_cb_ioctl_info_t cb_info;
\r
684 al_dev_open_context_t *p_context;
\r
685 al_mad_element_t *p_al_mad;
\r
688 AL_ENTER( AL_DBG_MAD );
\r
690 p_context = h_mad_svc->obj.h_al->p_context;
\r
692 p_al_mad = PARENT_STRUCT( p_mad_element, al_mad_element_t, element );
\r
694 /* Set up context and callback record type appropriate for UAL */
\r
695 cb_info.rec_type = MAD_RECV_REC;
\r
696 cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context = mad_svc_context;
\r
697 cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.elem_size = p_mad_element->size;
\r
698 cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad =
\r
699 (ib_mad_element_t* __ptr64)p_mad_element->send_context1;
\r
702 * If we're already closing the device - do not queue a callback, since
\r
703 * we're cleaning up the callback lists.
\r
705 if( !proxy_context_ref( p_context ) )
\r
707 proxy_put_mad( p_mad_element );
\r
708 AL_EXIT( AL_DBG_MAD );
\r
712 /* Insert an item to track the MAD until the user fetches it. */
\r
713 cl_spinlock_acquire( &p_context->h_al->obj.lock );
\r
714 hdl = al_hdl_insert( p_context->h_al, p_mad_element, AL_OBJ_TYPE_H_MAD );
\r
715 if( hdl == AL_INVALID_HANDLE )
\r
716 goto proxy_mad_recv_cb_err;
\r
718 cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.h_mad = hdl;
\r
720 /* Queue this mad completion notification for the user. */
\r
721 if( !proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,
\r
722 &h_mad_svc->obj ) )
\r
724 al_hdl_free( p_context->h_al, hdl );
\r
725 proxy_mad_recv_cb_err:
\r
726 proxy_put_mad( p_mad_element );
\r
728 cl_spinlock_release( &p_context->h_al->obj.lock );
\r
730 proxy_context_deref( p_context );
\r
732 AL_EXIT( AL_DBG_MAD );
\r
739 IN void *p_open_context,
\r
740 IN cl_ioctl_handle_t h_ioctl,
\r
741 OUT size_t *p_ret_bytes )
\r
743 ual_reg_mad_svc_ioctl_t *p_ioctl =
\r
744 (ual_reg_mad_svc_ioctl_t *)cl_ioctl_in_buf( h_ioctl );
\r
745 al_dev_open_context_t *p_context =
\r
746 (al_dev_open_context_t *)p_open_context;
\r
747 ib_qp_handle_t h_qp;
\r
748 ib_mad_svc_handle_t h_mad_svc;
\r
750 AL_ENTER( AL_DBG_MAD );
\r
752 /* Validate input buffers. */
\r
753 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
754 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
755 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
757 AL_EXIT( AL_DBG_MAD );
\r
758 return CL_INVALID_PARAMETER;
\r
761 /* Set the return bytes in all cases */
\r
762 *p_ret_bytes = sizeof(p_ioctl->out);
\r
764 /* Validate QP handle */
\r
765 h_qp = (ib_qp_handle_t)
\r
766 al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP );
\r
769 p_ioctl->out.status = IB_INVALID_QP_HANDLE;
\r
770 p_ioctl->out.h_mad_svc = AL_INVALID_HANDLE;
\r
771 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );
\r
775 /* Now proxy's mad_svc overrides */
\r
776 p_ioctl->in.mad_svc.pfn_mad_send_cb = __proxy_mad_send_cb;
\r
777 p_ioctl->in.mad_svc.pfn_mad_recv_cb = __proxy_mad_recv_cb;
\r
779 p_ioctl->out.status = reg_mad_svc( h_qp,
\r
780 &p_ioctl->in.mad_svc, &h_mad_svc );
\r
781 if( p_ioctl->out.status == IB_SUCCESS )
\r
783 p_ioctl->out.h_mad_svc = h_mad_svc->obj.hdl;
\r
784 h_mad_svc->obj.hdl_valid = TRUE;
\r
785 deref_al_obj( &h_mad_svc->obj );
\r
789 p_ioctl->out.h_mad_svc = AL_INVALID_HANDLE;
\r
792 deref_al_obj( &h_qp->obj );
\r
794 AL_EXIT( AL_DBG_MAD );
\r
801 * Deregister the MAD service.
\r
804 proxy_dereg_mad_svc(
\r
805 IN void *p_open_context,
\r
806 IN cl_ioctl_handle_t h_ioctl,
\r
807 OUT size_t *p_ret_bytes )
\r
809 ual_dereg_mad_svc_ioctl_t *p_ioctl;
\r
810 al_dev_open_context_t *p_context;
\r
811 ib_mad_svc_handle_t h_mad_svc;
\r
813 AL_ENTER( AL_DBG_MAD );
\r
815 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
816 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
817 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
819 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
820 ("IOCTL buffer is invalid\n") );
\r
821 return CL_INVALID_PARAMETER;
\r
824 p_ioctl = (ual_dereg_mad_svc_ioctl_t*)cl_ioctl_in_buf( h_ioctl );
\r
825 p_context = (al_dev_open_context_t*)p_open_context;
\r
827 /* Set the return bytes in all cases */
\r
828 *p_ret_bytes = sizeof(p_ioctl->out);
\r
830 /* Validate MAD service. */
\r
831 h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref(
\r
832 p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC );
\r
835 p_ioctl->out.status = IB_INVALID_HANDLE;
\r
836 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
840 /* Destroy the MAD service. */
\r
841 h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, ib_sync_destroy );
\r
842 p_ioctl->out.status = IB_SUCCESS;
\r
844 AL_EXIT( AL_DBG_MAD );
\r
851 * UAL only uses reg_mad_pool/dereg_mad_pool ioctls
\r
852 * create/destroy mad pool is implicit in these ioctls
\r
856 proxy_reg_mad_pool(
\r
857 IN void *p_open_context,
\r
858 IN cl_ioctl_handle_t h_ioctl,
\r
859 OUT size_t *p_ret_bytes )
\r
861 ual_reg_mad_pool_ioctl_t *p_ioctl =
\r
862 (ual_reg_mad_pool_ioctl_t *)cl_ioctl_in_buf( h_ioctl );
\r
863 al_dev_open_context_t *p_context =
\r
864 (al_dev_open_context_t *)p_open_context;
\r
865 ib_pd_handle_t h_pd;
\r
866 ib_pool_key_t pool_key;
\r
868 AL_ENTER( AL_DBG_MAD );
\r
870 /* Validate input buffers. */
\r
871 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
872 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
873 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
875 AL_EXIT( AL_DBG_MAD );
\r
876 return CL_INVALID_PARAMETER;
\r
879 /* Set the return bytes in all cases */
\r
880 *p_ret_bytes = sizeof(p_ioctl->out);
\r
882 /* Validate PD handle */
\r
883 h_pd = (ib_pd_handle_t)
\r
884 al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD );
\r
887 p_ioctl->out.status = IB_INVALID_PD_HANDLE;
\r
888 p_ioctl->out.pool_key = AL_INVALID_HANDLE;
\r
889 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") );
\r
894 * If we're in the kernel, we are using the global MAD pool. Other
\r
895 * MAD pools remain entirely in user-mode.
\r
898 /* Register the PD with the MAD pool to obtain a pool_key. */
\r
899 p_ioctl->out.status = reg_mad_pool( gh_mad_pool, h_pd, &pool_key );
\r
900 if( p_ioctl->out.status == IB_SUCCESS )
\r
902 /* Track the pool info with the process context. */
\r
903 p_ioctl->out.pool_key = pool_key->obj.hdl;
\r
904 pool_key->obj.hdl_valid = TRUE;
\r
905 deref_al_obj( &pool_key->obj );
\r
909 p_ioctl->out.pool_key = AL_INVALID_HANDLE;
\r
910 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("reg_mad_pool returned %s.\n",
\r
911 ib_get_err_str(p_ioctl->out.status)) );
\r
914 deref_al_obj( &h_pd->obj );
\r
916 AL_EXIT( AL_DBG_MAD );
\r
923 * Deregister the pool_key with the MAD pool. Destroy the MAD pool if we
\r
928 proxy_dereg_mad_pool(
\r
929 IN void *p_open_context,
\r
930 IN cl_ioctl_handle_t h_ioctl,
\r
931 OUT size_t *p_ret_bytes )
\r
933 ual_dereg_mad_pool_ioctl_t *p_ioctl =
\r
934 (ual_dereg_mad_pool_ioctl_t *)cl_ioctl_in_buf( h_ioctl );
\r
935 al_dev_open_context_t *p_context =
\r
936 (al_dev_open_context_t *)p_open_context;
\r
937 ib_pool_key_t pool_key;
\r
939 AL_ENTER( AL_DBG_MAD );
\r
941 /* Validate input buffers. */
\r
942 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
943 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
944 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
946 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
947 ("IOCTL buffer is invalid\n") );
\r
948 return CL_INVALID_PARAMETER;
\r
951 /* Set the return bytes in all cases */
\r
952 *p_ret_bytes = sizeof(p_ioctl->out);
\r
954 /* Validate pool key */
\r
955 pool_key = (ib_pool_key_t)al_hdl_ref(
\r
956 p_context->h_al, p_ioctl->in.pool_key, AL_OBJ_TYPE_H_POOL_KEY );
\r
959 p_ioctl->out.status = IB_INVALID_HANDLE;
\r
960 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
961 ("User-mode provided pool key is invalid\n") );
\r
965 /* We should only have alias pool keys exported to user-mode. */
\r
966 p_ioctl->out.status = dereg_mad_pool( pool_key, AL_KEY_ALIAS );
\r
967 if( p_ioctl->out.status != IB_SUCCESS )
\r
969 deref_al_obj( &pool_key->obj );
\r
970 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("dereg_mad_pool failed: %s\n",
\r
971 ib_get_err_str( p_ioctl->out.status )) );
\r
974 AL_EXIT( AL_DBG_MAD );
\r
982 IN void *p_open_context,
\r
983 IN cl_ioctl_handle_t h_ioctl,
\r
984 OUT size_t *p_ret_bytes )
\r
986 ual_cancel_mad_ioctl_t *p_ioctl;
\r
987 al_dev_open_context_t *p_context;
\r
988 ib_mad_svc_handle_t h_mad_svc;
\r
990 AL_ENTER( AL_DBG_MAD );
\r
992 /* Validate input buffers. */
\r
993 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
994 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
995 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
997 AL_EXIT( AL_DBG_MAD );
\r
998 return CL_INVALID_PARAMETER;
\r
1001 p_ioctl = (ual_cancel_mad_ioctl_t*)cl_ioctl_in_buf( h_ioctl );
\r
1002 p_context = (al_dev_open_context_t*)p_open_context;
\r
1004 /* Set the return bytes in all cases */
\r
1005 *p_ret_bytes = sizeof(p_ioctl->out);
\r
1007 /* Validate MAD service handle. */
\r
1008 h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref(
\r
1009 p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC );
\r
1012 p_ioctl->out.status = IB_INVALID_HANDLE;
\r
1013 AL_EXIT( AL_DBG_MAD );
\r
1014 return CL_SUCCESS;
\r
1017 p_ioctl->out.status =
\r
1018 ib_cancel_mad( h_mad_svc, p_ioctl->in.h_proxy_element );
\r
1021 * The clean up of resources allocated for the sent mad will
\r
1022 * be handled in the send completion callback
\r
1024 AL_EXIT( AL_DBG_MAD );
\r
1025 return CL_SUCCESS;
\r
1030 * Process the ioctl UAL_LOCAL_MAD:
\r
1032 static cl_status_t
\r
1034 IN void *p_open_context,
\r
1035 IN cl_ioctl_handle_t h_ioctl,
\r
1036 OUT size_t *p_ret_bytes )
\r
1038 ual_local_mad_ioctl_t *p_ioctl =
\r
1039 (ual_local_mad_ioctl_t *)cl_ioctl_in_buf( h_ioctl );
\r
1040 al_dev_open_context_t *p_context =
\r
1041 (al_dev_open_context_t *)p_open_context;
\r
1042 ib_ca_handle_t h_ca;
\r
1043 ib_api_status_t status;
\r
1045 AL_ENTER( AL_DBG_MAD );
\r
1047 /* Validate input buffers. */
\r
1048 if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) ||
\r
1049 cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) ||
\r
1050 cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) )
\r
1052 AL_EXIT( AL_DBG_MAD );
\r
1053 return CL_INVALID_PARAMETER;
\r
1056 if( ((ib_mad_t*)p_ioctl->in.mad_in)->method != IB_MAD_METHOD_GET )
\r
1058 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1059 ("invalid method %d\n", ((ib_mad_t*)p_ioctl->in.mad_in)->method) );
\r
1060 status = IB_UNSUPPORTED;
\r
1061 goto proxy_local_mad_err;
\r
1064 /* Validate CA handle */
\r
1065 h_ca = (ib_ca_handle_t)
\r
1066 al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA );
\r
1069 status = IB_INVALID_CA_HANDLE;
\r
1070 goto proxy_local_mad_err;
\r
1073 /* Set the return bytes in all cases */
\r
1074 *p_ret_bytes = sizeof(p_ioctl->out);
\r
1076 status = ib_local_mad(
\r
1077 h_ca, p_ioctl->in.port_num, p_ioctl->in.mad_in, p_ioctl->out.mad_out );
\r
1079 deref_al_obj( &h_ca->obj );
\r
1081 proxy_local_mad_err:
\r
1082 p_ioctl->out.status = status;
\r
1084 AL_EXIT( AL_DBG_MAD );
\r
1085 return CL_SUCCESS;
\r
1091 IN cl_ioctl_handle_t h_ioctl,
\r
1092 OUT size_t *p_ret_bytes )
\r
1094 cl_status_t cl_status;
\r
1095 IO_STACK_LOCATION *p_io_stack;
\r
1098 AL_ENTER( AL_DBG_DEV );
\r
1100 CL_ASSERT( h_ioctl && p_ret_bytes );
\r
1102 p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
\r
1103 p_context = p_io_stack->FileObject->FsContext;
\r
1107 AL_EXIT( AL_DBG_DEV );
\r
1108 return CL_INVALID_PARAMETER;
\r
1111 switch( cl_ioctl_ctl_code( h_ioctl ) )
\r
1114 cl_status = proxy_reg_svc( p_context, h_ioctl, p_ret_bytes );
\r
1116 case UAL_SEND_SA_REQ:
\r
1117 cl_status = proxy_send_sa_req( p_context, h_ioctl, p_ret_bytes );
\r
1119 case UAL_CANCEL_SA_REQ:
\r
1120 cl_status = proxy_cancel_sa_req( p_context, h_ioctl, p_ret_bytes );
\r
1122 case UAL_MAD_SEND:
\r
1123 cl_status = proxy_send_mad( p_context, h_ioctl, p_ret_bytes );
\r
1125 case UAL_INIT_DGRM_SVC:
\r
1126 cl_status = proxy_init_dgrm( p_context, h_ioctl, p_ret_bytes );
\r
1128 case UAL_REG_MAD_SVC:
\r
1129 cl_status = proxy_reg_mad_svc( p_context, h_ioctl, p_ret_bytes );
\r
1131 case UAL_REG_MAD_POOL:
\r
1132 cl_status = proxy_reg_mad_pool( p_context, h_ioctl, p_ret_bytes );
\r
1134 case UAL_CANCEL_MAD:
\r
1135 cl_status = proxy_cancel_mad( p_context, h_ioctl, p_ret_bytes );
\r
1137 case UAL_MAD_RECV_COMP:
\r
1138 cl_status = proxy_mad_comp( p_context, h_ioctl, p_ret_bytes );
\r
1140 case UAL_DEREG_SVC:
\r
1141 cl_status = proxy_dereg_svc( p_context, h_ioctl, p_ret_bytes );
\r
1143 case UAL_DEREG_MAD_SVC:
\r
1144 cl_status = proxy_dereg_mad_svc( p_context, h_ioctl, p_ret_bytes );
\r
1146 case UAL_DEREG_MAD_POOL:
\r
1147 cl_status = proxy_dereg_mad_pool( p_context, h_ioctl, p_ret_bytes );
\r
1149 case UAL_LOCAL_MAD:
\r
1150 cl_status = proxy_local_mad( p_context, h_ioctl, p_ret_bytes );
\r
1153 cl_status = CL_INVALID_PARAMETER;
\r