[DAPL2] Sync with OFED DAPL 2.0.21 src release
[mirror/winof/.git] / ulp / dapl2 / dapl / common / dapl_evd_util.c
1 /*\r
2  * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.\r
3  *\r
4  * This Software is licensed under one of the following licenses:\r
5  *\r
6  * 1) under the terms of the "Common Public License 1.0" a copy of which is\r
7  *    in the file LICENSE.txt in the root directory. The license is also\r
8  *    available from the Open Source Initiative, see\r
9  *    http://www.opensource.org/licenses/cpl.php.\r
10  *\r
11  * 2) under the terms of the "The BSD License" a copy of which is in the file\r
12  *    LICENSE2.txt in the root directory. The license is also available from\r
13  *    the Open Source Initiative, see\r
14  *    http://www.opensource.org/licenses/bsd-license.php.\r
15  *\r
16  * 3) under the terms of the "GNU General Public License (GPL) Version 2" a \r
17  *    copy of which is in the file LICENSE3.txt in the root directory. The \r
18  *    license is also available from the Open Source Initiative, see\r
19  *    http://www.opensource.org/licenses/gpl-license.php.\r
20  *\r
21  * Licensee has the right to choose one of the above licenses.\r
22  *\r
23  * Redistributions of source code must retain the above copyright\r
24  * notice and one of the license notices.\r
25  *\r
26  * Redistributions in binary form must reproduce both the above copyright\r
27  * notice, one of the license notices in the documentation\r
28  * and/or other materials provided with the distribution.\r
29  */\r
30 \r
31 /**********************************************************************\r
32  *\r
33  * MODULE: dapl_evd_util.c\r
34  *\r
35  * PURPOSE: Manage EVD Info structure\r
36  *\r
37  * $Id: dapl_evd_util.c 1410 2006-07-19 17:12:02Z ardavis $\r
38  **********************************************************************/\r
39 \r
40 #include "dapl_evd_util.h"\r
41 #include "dapl_ia_util.h"\r
42 #include "dapl_cno_util.h"\r
43 #include "dapl_ring_buffer_util.h"\r
44 #include "dapl_adapter_util.h"\r
45 #include "dapl_cookie.h"\r
46 #include "dapl.h"\r
47 #include "dapl_cr_util.h"\r
48 #include "dapl_sp_util.h"\r
49 #include "dapl_ep_util.h"\r
50 \r
51 STATIC _INLINE_ void dapli_evd_eh_print_cqe(IN ib_work_completion_t * cqe);\r
52 \r
53 DAT_RETURN dapli_evd_event_alloc(IN DAPL_EVD * evd_ptr, IN DAT_COUNT qlen);\r
54 \r
55 char *dapl_event_str(IN DAT_EVENT_NUMBER event_num)\r
56 {\r
57 #if defined(DAPL_DBG)\r
58         struct dat_event_str {\r
59                 char *str;\r
60                 DAT_EVENT_NUMBER num;\r
61         };\r
62         static struct dat_event_str events[] = {\r
63                 {"DAT_DTO_COMPLETION_EVENT", DAT_DTO_COMPLETION_EVENT},\r
64                 {"DAT_RMR_BIND_COMPLETION_EVENT",\r
65                  DAT_RMR_BIND_COMPLETION_EVENT},\r
66                 {"DAT_CONNECTION_REQUEST_EVENT", DAT_CONNECTION_REQUEST_EVENT},\r
67                 {"DAT_CONNECTION_EVENT_ESTABLISHED",\r
68                  DAT_CONNECTION_EVENT_ESTABLISHED},\r
69                 {"DAT_CONNECTION_EVENT_PEER_REJECTED",\r
70                  DAT_CONNECTION_EVENT_PEER_REJECTED},\r
71                 {"DAT_CONNECTION_EVENT_NON_PEER_REJECTED",\r
72                  DAT_CONNECTION_EVENT_NON_PEER_REJECTED},\r
73                 {"DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR",\r
74                  DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR},\r
75                 {"DAT_CONNECTION_EVENT_DISCONNECTED",\r
76                  DAT_CONNECTION_EVENT_DISCONNECTED},\r
77                 {"DAT_CONNECTION_EVENT_BROKEN", DAT_CONNECTION_EVENT_BROKEN},\r
78                 {"DAT_CONNECTION_EVENT_TIMED_OUT",\r
79                  DAT_CONNECTION_EVENT_TIMED_OUT},\r
80                 {"DAT_CONNECTION_EVENT_UNREACHABLE",\r
81                  DAT_CONNECTION_EVENT_UNREACHABLE},\r
82                 {"DAT_ASYNC_ERROR_EVD_OVERFLOW", DAT_ASYNC_ERROR_EVD_OVERFLOW},\r
83                 {"DAT_ASYNC_ERROR_IA_CATASTROPHIC",\r
84                  DAT_ASYNC_ERROR_IA_CATASTROPHIC},\r
85                 {"DAT_ASYNC_ERROR_EP_BROKEN", DAT_ASYNC_ERROR_EP_BROKEN},\r
86                 {"DAT_ASYNC_ERROR_TIMED_OUT", DAT_ASYNC_ERROR_TIMED_OUT},\r
87                 {"DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR",\r
88                  DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR},\r
89                 {"DAT_HA_DOWN_TO_1", DAT_HA_DOWN_TO_1},\r
90                 {"DAT_HA_UP_TO_MULTI_PATH", DAT_HA_UP_TO_MULTI_PATH},\r
91                 {"DAT_SOFTWARE_EVENT", DAT_SOFTWARE_EVENT},\r
92 #ifdef DAT_EXTENSIONS\r
93                 {"DAT_EXTENSION_EVENT", DAT_EXTENSION_EVENT},\r
94                 {"DAT_IB_EXTENSION_RANGE_BASE", DAT_IB_EXTENSION_RANGE_BASE},\r
95                 {"DAT_IB_UD_CONNECTION_REQUEST_EVENT",\r
96                  DAT_IB_EXTENSION_RANGE_BASE + 1},\r
97                 {"DAT_IB_UD_CONNECTION_EVENT_ESTABLISHED",\r
98                  DAT_IB_EXTENSION_RANGE_BASE + 2},\r
99                 {"DAT_IW_EXTENSION_RANGE_BASE", DAT_IW_EXTENSION_RANGE_BASE},\r
100 #endif                          /* DAT_EXTENSIONS */\r
101                 {NULL, 0},\r
102         };\r
103         int i;\r
104 \r
105         for (i = 0; events[i].str; i++) {\r
106                 if (events[i].num == event_num)\r
107                         return events[i].str;\r
108         }\r
109         return "Unknown DAT event?";\r
110 #else\r
111         static char str[16];\r
112         sprintf(str, "%x", event_num);\r
113         return str;\r
114 #endif\r
115 }\r
116 \r
117 /*\r
118  * dapls_evd_internal_create\r
119  *\r
120  * actually create the evd.  this is called after all parameter checking\r
121  * has been performed in dapl_ep_create.  it is also called from dapl_ia_open\r
122  * to create the default async evd.\r
123  *\r
124  * Input:\r
125  *      ia_ptr\r
126  *      cno_ptr\r
127  *      qlen\r
128  *      evd_flags\r
129  *\r
130  * Output:\r
131  *      evd_ptr_ptr\r
132  *\r
133  * Returns:\r
134  *      none\r
135  *\r
136  */\r
137 \r
138 DAT_RETURN\r
139 dapls_evd_internal_create(DAPL_IA * ia_ptr,\r
140                           DAPL_CNO * cno_ptr,\r
141                           DAT_COUNT min_qlen,\r
142                           DAT_EVD_FLAGS evd_flags, DAPL_EVD ** evd_ptr_ptr)\r
143 {\r
144         DAPL_EVD *evd_ptr;\r
145         DAT_COUNT cq_len;\r
146         DAT_RETURN dat_status;\r
147 \r
148         dat_status = DAT_SUCCESS;\r
149         *evd_ptr_ptr = NULL;\r
150         cq_len = min_qlen;\r
151 \r
152         evd_ptr = dapls_evd_alloc(ia_ptr, cno_ptr, evd_flags, min_qlen);\r
153         if (!evd_ptr) {\r
154                 dat_status =\r
155                     DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
156                 goto bail;\r
157         }\r
158 \r
159         /*\r
160          * If we are dealing with event streams besides a CQ event stream,\r
161          * be conservative and set producer side locking.  Otherwise, no.\r
162          */\r
163         evd_ptr->evd_producer_locking_needed =\r
164             !(evd_flags & (DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG));\r
165 \r
166         /* Before we setup any callbacks, transition state to OPEN.  */\r
167         evd_ptr->evd_state = DAPL_EVD_STATE_OPEN;\r
168 \r
169         if (evd_flags & DAT_EVD_ASYNC_FLAG) {\r
170                 /*\r
171                  * There is no cq associate with async evd. Set it to invalid\r
172                  */\r
173                 evd_ptr->ib_cq_handle = IB_INVALID_HANDLE;\r
174 \r
175         } else if (0 != (evd_flags & ~(DAT_EVD_SOFTWARE_FLAG\r
176                                        | DAT_EVD_CONNECTION_FLAG\r
177                                        | DAT_EVD_CR_FLAG))) {\r
178 #if defined(_VENDOR_IBAL_)\r
179                 /* \r
180                  * The creation of CQ required a PD (PZ) associated with it and\r
181                  * we do not have a PD here; therefore, the work-around is that we\r
182                  * will postpone the creation of the cq till the creation of QP which\r
183                  * this cq will associate with.\r
184                  */\r
185                 evd_ptr->ib_cq_handle = IB_INVALID_HANDLE;\r
186 #else\r
187                 dat_status = dapls_ib_cq_alloc(ia_ptr, evd_ptr, &cq_len);\r
188                 if (dat_status != DAT_SUCCESS) {\r
189                         goto bail;\r
190                 }\r
191 \r
192                 /* Now reset the cq_len in the attributes, it may have changed */\r
193                 evd_ptr->qlen = cq_len;\r
194 \r
195                 dat_status =\r
196                     dapls_ib_setup_async_callback(ia_ptr,\r
197                                                   DAPL_ASYNC_CQ_COMPLETION,\r
198                                                   evd_ptr,\r
199                                                   (ib_async_handler_t)\r
200                                                   dapl_evd_dto_callback,\r
201                                                   evd_ptr);\r
202                 if (dat_status != DAT_SUCCESS) {\r
203                         goto bail;\r
204                 }\r
205 \r
206                 dat_status = dapls_set_cq_notify(ia_ptr, evd_ptr);\r
207 \r
208                 if (dat_status != DAT_SUCCESS) {\r
209                         goto bail;\r
210                 }\r
211 #endif                          /* _VENDOR_IBAL_ */\r
212         }\r
213 \r
214         /* We now have an accurate count of events, so allocate them into\r
215          * the EVD\r
216          */\r
217         dat_status = dapli_evd_event_alloc(evd_ptr, cq_len);\r
218         if (dat_status != DAT_SUCCESS) {\r
219                 goto bail;\r
220         }\r
221 \r
222         dapl_ia_link_evd(ia_ptr, evd_ptr);\r
223         *evd_ptr_ptr = evd_ptr;\r
224 \r
225       bail:\r
226         if (dat_status != DAT_SUCCESS) {\r
227                 if (evd_ptr) {\r
228                         dapls_evd_dealloc(evd_ptr);\r
229                 }\r
230         }\r
231 \r
232         return dat_status;\r
233 }\r
234 \r
235 /*\r
236  * dapls_evd_alloc\r
237  *\r
238  * alloc and initialize an EVD struct\r
239  *\r
240  * Input:\r
241  *      ia\r
242  *\r
243  * Output:\r
244  *      evd_ptr\r
245  *\r
246  * Returns:\r
247  *      none\r
248  *\r
249  */\r
250 DAPL_EVD *dapls_evd_alloc(IN DAPL_IA * ia_ptr,\r
251                           IN DAPL_CNO * cno_ptr,\r
252                           IN DAT_EVD_FLAGS evd_flags, IN DAT_COUNT qlen)\r
253 {\r
254         DAPL_EVD *evd_ptr;\r
255 \r
256         /* Allocate EVD */\r
257         evd_ptr = (DAPL_EVD *) dapl_os_alloc(sizeof(DAPL_EVD));\r
258         if (!evd_ptr) {\r
259                 goto bail;\r
260         }\r
261 \r
262         /* zero the structure */\r
263         dapl_os_memzero(evd_ptr, sizeof(DAPL_EVD));\r
264 \r
265 #ifdef DAPL_COUNTERS\r
266         /* Allocate counters */\r
267         evd_ptr->cntrs =\r
268             dapl_os_alloc(sizeof(DAT_UINT64) * DCNT_EVD_ALL_COUNTERS);\r
269         if (evd_ptr->cntrs == NULL) {\r
270                 dapl_os_free(evd_ptr, sizeof(DAPL_EVD));\r
271                 return (NULL);\r
272         }\r
273         dapl_os_memzero(evd_ptr->cntrs,\r
274                         sizeof(DAT_UINT64) * DCNT_EVD_ALL_COUNTERS);\r
275 #endif                          /* DAPL_COUNTERS */\r
276 \r
277         /*\r
278          * initialize the header\r
279          */\r
280         evd_ptr->header.provider = ia_ptr->header.provider;\r
281         evd_ptr->header.magic = DAPL_MAGIC_EVD;\r
282         evd_ptr->header.handle_type = DAT_HANDLE_TYPE_EVD;\r
283         evd_ptr->header.owner_ia = ia_ptr;\r
284         evd_ptr->header.user_context.as_64 = 0;\r
285         evd_ptr->header.user_context.as_ptr = NULL;\r
286         dapl_llist_init_entry(&evd_ptr->header.ia_list_entry);\r
287         dapl_os_lock_init(&evd_ptr->header.lock);\r
288 \r
289         /*\r
290          * Initialize the body\r
291          */\r
292         evd_ptr->evd_state = DAPL_EVD_STATE_INITIAL;\r
293         evd_ptr->evd_flags = evd_flags;\r
294         evd_ptr->evd_enabled = DAT_TRUE;\r
295         evd_ptr->evd_waitable = DAT_TRUE;\r
296         evd_ptr->evd_producer_locking_needed = 1;       /* Conservative value.  */\r
297         evd_ptr->ib_cq_handle = IB_INVALID_HANDLE;\r
298         dapl_os_atomic_set(&evd_ptr->evd_ref_count, 0);\r
299         evd_ptr->catastrophic_overflow = DAT_FALSE;\r
300         evd_ptr->qlen = qlen;\r
301         evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD;    /* FIXME: should be DAPL_EVD_STATE_INIT */\r
302         dapl_os_wait_object_init(&evd_ptr->wait_object);\r
303 \r
304         evd_ptr->cno_active_count = 0;\r
305         if (cno_ptr != NULL) {\r
306                 /* Take a reference count on the CNO */\r
307                 dapl_os_atomic_inc(&cno_ptr->cno_ref_count);\r
308         }\r
309         evd_ptr->cno_ptr = cno_ptr;\r
310 \r
311       bail:\r
312         return evd_ptr;\r
313 }\r
314 \r
315 /*\r
316  * dapls_evd_event_alloc\r
317  *\r
318  * alloc events into an EVD.\r
319  *\r
320  * Input:\r
321  *      evd_ptr\r
322  *      qlen\r
323  *\r
324  * Output:\r
325  *      NONE\r
326  *\r
327  * Returns:\r
328  *      DAT_SUCCESS\r
329  *      ERROR\r
330  *\r
331  */\r
332 DAT_RETURN dapli_evd_event_alloc(IN DAPL_EVD * evd_ptr, IN DAT_COUNT qlen)\r
333 {\r
334         DAT_EVENT *event_ptr;\r
335         DAT_COUNT i;\r
336         DAT_RETURN dat_status;\r
337 \r
338         dat_status = DAT_SUCCESS;\r
339 \r
340         /* Allocate EVENTs */\r
341         event_ptr =\r
342             (DAT_EVENT *) dapl_os_alloc(evd_ptr->qlen * sizeof(DAT_EVENT));\r
343         if (event_ptr == NULL) {\r
344                 dat_status =\r
345                     DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
346                 goto bail;\r
347         }\r
348         evd_ptr->events = event_ptr;\r
349 \r
350         /* allocate free event queue */\r
351         dat_status = dapls_rbuf_alloc(&evd_ptr->free_event_queue, qlen);\r
352         if (dat_status != DAT_SUCCESS) {\r
353                 goto bail;\r
354         }\r
355 \r
356         /* allocate pending event queue */\r
357         dat_status = dapls_rbuf_alloc(&evd_ptr->pending_event_queue, qlen);\r
358         if (dat_status != DAT_SUCCESS) {\r
359                 goto bail;\r
360         }\r
361 \r
362         /* add events to free event queue */\r
363         for (i = 0; i < evd_ptr->qlen; i++) {\r
364                 dapls_rbuf_add(&evd_ptr->free_event_queue, (void *)event_ptr);\r
365                 event_ptr++;\r
366         }\r
367 \r
368         evd_ptr->cq_notified = DAT_FALSE;\r
369         evd_ptr->cq_notified_when = 0;\r
370         evd_ptr->threshold = 0;\r
371 \r
372       bail:\r
373         return dat_status;\r
374 }\r
375 \r
376 /*\r
377  * dapls_evd_event_realloc\r
378  *\r
379  * realloc events into an EVD.\r
380  *\r
381  * Input:\r
382  *      evd_ptr\r
383  *      qlen\r
384  *\r
385  * Output:\r
386  *      NONE\r
387  *\r
388  * Returns:\r
389  *      DAT_SUCCESS\r
390  *      ERROR\r
391  *\r
392  */\r
393 DAT_RETURN dapls_evd_event_realloc(IN DAPL_EVD * evd_ptr, IN DAT_COUNT qlen)\r
394 {\r
395         DAT_EVENT *events;\r
396         DAT_COUNT old_qlen;\r
397         DAT_COUNT i;\r
398         intptr_t diff;\r
399         DAT_RETURN dat_status;\r
400 \r
401         /* Allocate EVENTs */\r
402         events = (DAT_EVENT *) dapl_os_realloc(evd_ptr->events,\r
403                                                qlen * sizeof(DAT_EVENT));\r
404         if (NULL == events) {\r
405                 dat_status =\r
406                     DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY);\r
407                 goto bail;\r
408         }\r
409 \r
410         diff = events - evd_ptr->events;\r
411         evd_ptr->events = events;\r
412 \r
413         old_qlen = evd_ptr->qlen;\r
414         evd_ptr->qlen = qlen;\r
415 \r
416         /* reallocate free event queue */\r
417         dat_status = dapls_rbuf_realloc(&evd_ptr->free_event_queue, qlen);\r
418         if (dat_status != DAT_SUCCESS) {\r
419                 goto bail;\r
420         }\r
421         dapls_rbuf_adjust(&evd_ptr->free_event_queue, diff);\r
422 \r
423         /* reallocate pending event queue */\r
424         dat_status = dapls_rbuf_realloc(&evd_ptr->pending_event_queue, qlen);\r
425         if (dat_status != DAT_SUCCESS) {\r
426                 goto bail;\r
427         }\r
428         dapls_rbuf_adjust(&evd_ptr->pending_event_queue, diff);\r
429 \r
430         /*\r
431          * add new events to free event queue. \r
432          */\r
433         for (i = old_qlen; i < qlen; i++) {\r
434                 dapls_rbuf_add(&evd_ptr->free_event_queue, (void *)&events[i]);\r
435         }\r
436 \r
437       bail:\r
438         return dat_status;\r
439 }\r
440 \r
441 /*\r
442  * dapls_evd_dealloc\r
443  *\r
444  * Free the passed in EVD structure. If an error occurs, this function\r
445  * will clean up all of the internal data structures and report the\r
446  * error.\r
447  *\r
448  * Input:\r
449  *      evd_ptr\r
450  *\r
451  * Output:\r
452  *      none\r
453  *\r
454  * Returns:\r
455  *      status\r
456  *\r
457  */\r
458 DAT_RETURN dapls_evd_dealloc(IN DAPL_EVD * evd_ptr)\r
459 {\r
460         DAT_RETURN dat_status;\r
461         DAPL_IA *ia_ptr;\r
462 \r
463         dat_status = DAT_SUCCESS;\r
464 \r
465         dapl_os_assert(evd_ptr->header.magic == DAPL_MAGIC_EVD);\r
466         dapl_os_assert(dapl_os_atomic_read(&evd_ptr->evd_ref_count) == 0);\r
467 \r
468         /*\r
469          * Destroy the CQ first, to keep any more callbacks from coming\r
470          * up from it.\r
471          */\r
472         evd_ptr->evd_enabled = DAT_FALSE;\r
473         if (evd_ptr->ib_cq_handle != IB_INVALID_HANDLE) {\r
474                 ia_ptr = evd_ptr->header.owner_ia;\r
475 \r
476                 dat_status = dapls_ib_cq_free(ia_ptr, evd_ptr);\r
477                 if (dat_status != DAT_SUCCESS) {\r
478                         goto bail;\r
479                 }\r
480         }\r
481 \r
482         /*\r
483          * We should now be safe to invalidate the EVD; reset the\r
484          * magic to prevent reuse.\r
485          */\r
486         evd_ptr->header.magic = DAPL_MAGIC_INVALID;\r
487 \r
488         /* Release reference on the CNO if it exists */\r
489         if (evd_ptr->cno_ptr != NULL) {\r
490                 dapl_os_atomic_dec(&evd_ptr->cno_ptr->cno_ref_count);\r
491                 evd_ptr->cno_ptr = NULL;\r
492         }\r
493 \r
494         /* If the ring buffer allocation failed, then the dapls_rbuf_destroy   */\r
495         /* function will detect that the ring buffer's internal data (ex. base */\r
496         /* pointer) are invalid and will handle the situation appropriately    */\r
497         dapls_rbuf_destroy(&evd_ptr->free_event_queue);\r
498         dapls_rbuf_destroy(&evd_ptr->pending_event_queue);\r
499 \r
500         if (evd_ptr->events) {\r
501                 dapl_os_free(evd_ptr->events,\r
502                              evd_ptr->qlen * sizeof(DAT_EVENT));\r
503         }\r
504 \r
505         dapl_os_wait_object_destroy(&evd_ptr->wait_object);\r
506 \r
507 #ifdef DAPL_COUNTERS\r
508         dapl_os_free(evd_ptr->cntrs,\r
509                      sizeof(DAT_UINT64) * DCNT_EVD_ALL_COUNTERS);\r
510 #endif                          /* DAPL_COUNTERS */\r
511 \r
512         dapl_os_free(evd_ptr, sizeof(DAPL_EVD));\r
513 \r
514       bail:\r
515         return dat_status;\r
516 }\r
517 \r
518 STATIC _INLINE_ char *DAPL_GET_DTO_OP_STR(int op)\r
519 {\r
520         static char *dto_ops[] = {\r
521                 "OP_SEND",\r
522                 "OP_RECEIVE",\r
523                 "OP_RDMA_WRITE",\r
524                 "OP_RDMA_READ"\r
525         };\r
526         return ((op < 0 || op > 3) ? "Invalid DTO OP?" : dto_ops[op]);\r
527 }\r
528 \r
529 #if !defined(DAPL_GET_CQE_OP_STR)\r
530 #define DAPL_GET_CQE_OP_STR(e) "Unknown CEQ OP String?"\r
531 #endif\r
532 #if !defined(DAPL_GET_CQE_VENDOR_ERR)\r
533 #define DAPL_GET_CQE_VENDOR_ERR(e) 0\r
534 #endif\r
535 \r
536 /*\r
537  * dapli_evd_eh_print_cqe\r
538  *\r
539  * Input:\r
540  *      cqe_ptr\r
541  *\r
542  * Output:\r
543  *      none\r
544  *\r
545  * Prints out a CQE for debug purposes\r
546  *\r
547  */\r
548 \r
549 void dapli_evd_eh_print_cqe(IN ib_work_completion_t * cqe_ptr)\r
550 {\r
551 #ifdef DAPL_DBG\r
552         dapl_dbg_log(DAPL_DBG_TYPE_CALLBACK,\r
553                      "\t >>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<\n");\r
554         dapl_dbg_log(DAPL_DBG_TYPE_CALLBACK,\r
555                      "\t dapl_evd_dto_callback : CQE \n");\r
556         dapl_dbg_log(DAPL_DBG_TYPE_CALLBACK,\r
557                      "\t\t work_req_id %lli\n", DAPL_GET_CQE_WRID(cqe_ptr));\r
558         if (DAPL_GET_CQE_STATUS(cqe_ptr) == 0) {\r
559                 dapl_dbg_log(DAPL_DBG_TYPE_CALLBACK,\r
560                              "\t\t op_type: %s\n",\r
561                              DAPL_GET_CQE_OP_STR(cqe_ptr));\r
562                 dapl_dbg_log(DAPL_DBG_TYPE_CALLBACK,\r
563                              "\t\t bytes_num %d\n",\r
564                              DAPL_GET_CQE_BYTESNUM(cqe_ptr));\r
565         }\r
566         dapl_dbg_log(DAPL_DBG_TYPE_CALLBACK,\r
567                      "\t\t status %d vendor_err 0x%x\n",\r
568                      DAPL_GET_CQE_STATUS(cqe_ptr),\r
569                      DAPL_GET_CQE_VENDOR_ERR(cqe_ptr));\r
570         dapl_dbg_log(DAPL_DBG_TYPE_CALLBACK,\r
571                      "\t >>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<\n");\r
572 #endif\r
573         return;\r
574 }\r
575 \r
576 /*\r
577  * Event posting code follows.\r
578  */\r
579 \r
580 /*\r
581  * These next two functions (dapli_evd_get_event and dapli_evd_post_event)\r
582  * are a pair.  They are always called together, from one of the functions\r
583  * at the end of this file (dapl_evd_post_*_event).\r
584  *\r
585  * Note that if producer side locking is enabled, the first one takes the\r
586  * EVD lock and the second releases it.\r
587  */\r
588 \r
589 /* dapli_evd_get_event\r
590  *\r
591  * Get an event struct from the evd.  The caller should fill in the event\r
592  * and call dapl_evd_post_event.\r
593  *\r
594  * If there are no events available, an overflow event is generated to the\r
595  * async EVD handler.\r
596  *\r
597  * If this EVD required producer locking, a successful return implies\r
598  * that the lock is held.\r
599  *\r
600  * Input:\r
601  *      evd_ptr\r
602  *\r
603  * Output:\r
604  *      event\r
605  *\r
606  */\r
607 \r
608 static DAT_EVENT *dapli_evd_get_event(DAPL_EVD * evd_ptr)\r
609 {\r
610         DAT_EVENT *event;\r
611 \r
612         if (evd_ptr->evd_producer_locking_needed) {\r
613                 dapl_os_lock(&evd_ptr->header.lock);\r
614         }\r
615 \r
616         event = (DAT_EVENT *) dapls_rbuf_remove(&evd_ptr->free_event_queue);\r
617 \r
618         /* Release the lock if it was taken and the call failed.  */\r
619         if (!event && evd_ptr->evd_producer_locking_needed) {\r
620                 dapl_os_unlock(&evd_ptr->header.lock);\r
621         }\r
622 \r
623         return event;\r
624 }\r
625 \r
626 /* dapli_evd_post_event\r
627  *\r
628  * Post the <event> to the evd.  If possible, invoke the evd's CNO.\r
629  * Otherwise post the event on the pending queue.\r
630  *\r
631  * If producer side locking is required, the EVD lock must be held upon\r
632  * entry to this function.\r
633  *\r
634  * Input:\r
635  *      evd_ptr\r
636  *      event\r
637  *\r
638  * Output:\r
639  *      none\r
640  *\r
641  */\r
642 \r
643 static void\r
644 dapli_evd_post_event(IN DAPL_EVD * evd_ptr, IN const DAT_EVENT * event_ptr)\r
645 {\r
646         DAT_RETURN dat_status;\r
647         DAPL_CNO *cno_to_trigger = NULL;\r
648 \r
649         dapl_dbg_log(DAPL_DBG_TYPE_EVD, "%s: Called with event %s\n",\r
650                      __FUNCTION__, dapl_event_str(event_ptr->event_number));\r
651 \r
652         dat_status = dapls_rbuf_add(&evd_ptr->pending_event_queue,\r
653                                     (void *)event_ptr);\r
654         dapl_os_assert(dat_status == DAT_SUCCESS);\r
655 \r
656         dapl_os_assert(evd_ptr->evd_state == DAPL_EVD_STATE_WAITED\r
657                        || evd_ptr->evd_state == DAPL_EVD_STATE_OPEN);\r
658 \r
659         if (evd_ptr->evd_state == DAPL_EVD_STATE_OPEN) {\r
660                 /* No waiter.  Arrange to trigger a CNO if it exists.  */\r
661 \r
662                 if (evd_ptr->evd_enabled) {\r
663                         cno_to_trigger = evd_ptr->cno_ptr;\r
664                 }\r
665                 if (evd_ptr->evd_producer_locking_needed) {\r
666                         dapl_os_unlock(&evd_ptr->header.lock);\r
667                 }\r
668         } else {\r
669                 /*\r
670                  * We're in DAPL_EVD_STATE_WAITED.  Take the lock if\r
671                  * we don't have it, recheck, and signal.\r
672                  */\r
673                 if (!evd_ptr->evd_producer_locking_needed) {\r
674                         dapl_os_lock(&evd_ptr->header.lock);\r
675                 }\r
676 \r
677                 if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED\r
678                     && (dapls_rbuf_count(&evd_ptr->pending_event_queue)\r
679                         >= evd_ptr->threshold)) {\r
680                         dapl_os_unlock(&evd_ptr->header.lock);\r
681 \r
682                         if (evd_ptr->evd_flags & (DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG)) {\r
683                                 dapls_evd_dto_wakeup(evd_ptr);\r
684                         } else {\r
685                                 dapl_os_wait_object_wakeup(&evd_ptr->wait_object);\r
686                         }\r
687 \r
688                 } else {\r
689                         dapl_os_unlock(&evd_ptr->header.lock);\r
690                 }\r
691         }\r
692 \r
693         if (cno_to_trigger != NULL) {\r
694                 dapl_internal_cno_trigger(cno_to_trigger, evd_ptr);\r
695         }\r
696 }\r
697 \r
698 /* dapli_evd_post_event_nosignal\r
699  *\r
700  * Post the <event> to the evd.  Do not do any wakeup processing.\r
701  * This function should only be called if it is known that there are\r
702  * no waiters that it is appropriate to wakeup on this EVD.  An example\r
703  * of such a situation is during internal dat_evd_wait() processing.\r
704  *\r
705  * If producer side locking is required, the EVD lock must be held upon\r
706  * entry to this function.\r
707  *\r
708  * Input:\r
709  *      evd_ptr\r
710  *      event\r
711  *\r
712  * Output:\r
713  *      none\r
714  *\r
715  */\r
716 \r
717 static void\r
718 dapli_evd_post_event_nosignal(IN DAPL_EVD * evd_ptr,\r
719                               IN const DAT_EVENT * event_ptr)\r
720 {\r
721         DAT_RETURN dat_status;\r
722 \r
723         dapl_dbg_log(DAPL_DBG_TYPE_EVD, "%s: Called with event %s\n",\r
724                      __FUNCTION__, dapl_event_str(event_ptr->event_number));\r
725 \r
726         dat_status = dapls_rbuf_add(&evd_ptr->pending_event_queue,\r
727                                     (void *)event_ptr);\r
728         dapl_os_assert(dat_status == DAT_SUCCESS);\r
729 \r
730         dapl_os_assert(evd_ptr->evd_state == DAPL_EVD_STATE_WAITED\r
731                        || evd_ptr->evd_state == DAPL_EVD_STATE_OPEN);\r
732 \r
733         if (evd_ptr->evd_producer_locking_needed) {\r
734                 dapl_os_unlock(&evd_ptr->header.lock);\r
735         }\r
736 }\r
737 \r
738 /* dapli_evd_format_overflow_event\r
739  *\r
740  * format an overflow event for posting\r
741  *\r
742  * Input:\r
743  *      evd_ptr\r
744  *      event_ptr\r
745  *\r
746  * Output:\r
747  *      none\r
748  *\r
749  */\r
750 static void\r
751 dapli_evd_format_overflow_event(IN DAPL_EVD * evd_ptr,\r
752                                 OUT DAT_EVENT * event_ptr)\r
753 {\r
754         DAPL_IA *ia_ptr;\r
755 \r
756         ia_ptr = evd_ptr->header.owner_ia;\r
757 \r
758         event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr;\r
759         event_ptr->event_number = DAT_ASYNC_ERROR_EVD_OVERFLOW;\r
760         event_ptr->event_data.asynch_error_event_data.dat_handle =\r
761             (DAT_HANDLE) ia_ptr;\r
762 }\r
763 \r
764 /* dapli_evd_post_overflow_event\r
765  *\r
766  * post an overflow event\r
767  *\r
768  * Input:\r
769  *      async_evd_ptr\r
770  *      evd_ptr\r
771  *\r
772  * Output:\r
773  *      none\r
774  *\r
775  */\r
776 static void\r
777 dapli_evd_post_overflow_event(IN DAPL_EVD * async_evd_ptr,\r
778                               IN DAPL_EVD * overflow_evd_ptr)\r
779 {\r
780         DAT_EVENT *overflow_event;\r
781 \r
782         /* The overflow_evd_ptr mght be the same as evd.\r
783          * In that case we've got a catastrophic overflow.\r
784          */\r
785         dapl_log(DAPL_DBG_TYPE_WARN,\r
786                  " WARNING: overflow event on EVD %p/n", overflow_evd_ptr);\r
787 \r
788         if (async_evd_ptr == overflow_evd_ptr) {\r
789                 async_evd_ptr->catastrophic_overflow = DAT_TRUE;\r
790                 async_evd_ptr->evd_state = DAPL_EVD_STATE_DEAD;\r
791                 return;\r
792         }\r
793 \r
794         overflow_event = dapli_evd_get_event(overflow_evd_ptr);\r
795         if (!overflow_event) {\r
796                 /* this is not good */\r
797                 overflow_evd_ptr->catastrophic_overflow = DAT_TRUE;\r
798                 overflow_evd_ptr->evd_state = DAPL_EVD_STATE_DEAD;\r
799                 return;\r
800         }\r
801         dapli_evd_format_overflow_event(overflow_evd_ptr, overflow_event);\r
802         dapli_evd_post_event(overflow_evd_ptr, overflow_event);\r
803 \r
804         return;\r
805 }\r
806 \r
807 static DAT_EVENT *dapli_evd_get_and_init_event(IN DAPL_EVD * evd_ptr,\r
808                                                IN DAT_EVENT_NUMBER event_number)\r
809 {\r
810         DAT_EVENT *event_ptr;\r
811 \r
812         event_ptr = dapli_evd_get_event(evd_ptr);\r
813         if (NULL == event_ptr) {\r
814                 dapli_evd_post_overflow_event(evd_ptr->header.owner_ia->\r
815                                               async_error_evd, evd_ptr);\r
816         } else {\r
817                 event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr;\r
818                 event_ptr->event_number = event_number;\r
819         }\r
820 \r
821         return event_ptr;\r
822 }\r
823 \r
824 DAT_RETURN\r
825 dapls_evd_post_cr_arrival_event(IN DAPL_EVD * evd_ptr,\r
826                                 IN DAT_EVENT_NUMBER event_number,\r
827                                 IN DAT_SP_HANDLE sp_handle,\r
828                                 DAT_IA_ADDRESS_PTR ia_address_ptr,\r
829                                 DAT_CONN_QUAL conn_qual,\r
830                                 DAT_CR_HANDLE cr_handle)\r
831 {\r
832         DAT_EVENT *event_ptr;\r
833         event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
834         /*\r
835          * Note event lock may be held on successful return\r
836          * to be released by dapli_evd_post_event(), if provider side locking\r
837          * is needed.\r
838          */\r
839 \r
840         if (event_ptr == NULL) {\r
841                 return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
842                                  DAT_RESOURCE_MEMORY);\r
843         }\r
844 \r
845         event_ptr->event_data.cr_arrival_event_data.sp_handle = sp_handle;\r
846         event_ptr->event_data.cr_arrival_event_data.local_ia_address_ptr\r
847             = ia_address_ptr;\r
848         event_ptr->event_data.cr_arrival_event_data.conn_qual = conn_qual;\r
849         event_ptr->event_data.cr_arrival_event_data.cr_handle = cr_handle;\r
850 \r
851         dapli_evd_post_event(evd_ptr, event_ptr);\r
852 \r
853         return DAT_SUCCESS;\r
854 }\r
855 \r
856 DAT_RETURN\r
857 dapls_evd_post_connection_event(IN DAPL_EVD * evd_ptr,\r
858                                 IN DAT_EVENT_NUMBER event_number,\r
859                                 IN DAT_EP_HANDLE ep_handle,\r
860                                 IN DAT_COUNT private_data_size,\r
861                                 IN DAT_PVOID private_data)\r
862 {\r
863         DAT_EVENT *event_ptr;\r
864         event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
865         /*\r
866          * Note event lock may be held on successful return\r
867          * to be released by dapli_evd_post_event(), if provider side locking\r
868          * is needed.\r
869          */\r
870 \r
871         if (event_ptr == NULL) {\r
872                 return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
873                                  DAT_RESOURCE_MEMORY);\r
874         }\r
875 \r
876         event_ptr->event_data.connect_event_data.ep_handle = ep_handle;\r
877         event_ptr->event_data.connect_event_data.private_data_size\r
878             = private_data_size;\r
879         event_ptr->event_data.connect_event_data.private_data = private_data;\r
880 \r
881         dapli_evd_post_event(evd_ptr, event_ptr);\r
882 \r
883         return DAT_SUCCESS;\r
884 }\r
885 \r
886 DAT_RETURN\r
887 dapls_evd_post_async_error_event(IN DAPL_EVD * evd_ptr,\r
888                                  IN DAT_EVENT_NUMBER event_number,\r
889                                  IN DAT_IA_HANDLE ia_handle)\r
890 {\r
891         DAT_EVENT *event_ptr;\r
892         event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
893         /*\r
894          * Note event lock may be held on successful return\r
895          * to be released by dapli_evd_post_event(), if provider side locking\r
896          * is needed.\r
897          */\r
898         dapl_log(DAPL_DBG_TYPE_WARN,\r
899                  " WARNING: async event - %s evd=%p/n",\r
900                  dapl_event_str(event_number), evd_ptr);\r
901 \r
902         if (event_ptr == NULL) {\r
903                 return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
904                                  DAT_RESOURCE_MEMORY);\r
905         }\r
906 \r
907         event_ptr->event_data.asynch_error_event_data.dat_handle =\r
908             (DAT_HANDLE) ia_handle;\r
909 \r
910         dapli_evd_post_event(evd_ptr, event_ptr);\r
911 \r
912         return DAT_SUCCESS;\r
913 }\r
914 \r
915 DAT_RETURN\r
916 dapls_evd_post_software_event(IN DAPL_EVD * evd_ptr,\r
917                               IN DAT_EVENT_NUMBER event_number,\r
918                               IN DAT_PVOID pointer)\r
919 {\r
920         DAT_EVENT *event_ptr;\r
921         event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
922         /*\r
923          * Note event lock may be held on successful return\r
924          * to be released by dapli_evd_post_event(), if provider side locking\r
925          * is needed.\r
926          */\r
927 \r
928         if (event_ptr == NULL) {\r
929                 return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
930                                  DAT_RESOURCE_MEMORY);\r
931         }\r
932 \r
933         event_ptr->event_data.software_event_data.pointer = pointer;\r
934 \r
935         dapli_evd_post_event(evd_ptr, event_ptr);\r
936 \r
937         return DAT_SUCCESS;\r
938 }\r
939 \r
940 /*\r
941  * dapls_evd_post_generic_event\r
942  *\r
943  * Post a generic event type. Not used by all providers\r
944  *\r
945  * Input:\r
946  *      evd_ptr\r
947  *      event_number\r
948  *      data\r
949  *\r
950  * Output:\r
951  *      none\r
952  *\r
953  * Returns:\r
954  *      DAT_SUCCESS\r
955  *\r
956  */\r
957 DAT_RETURN\r
958 dapls_evd_post_generic_event(IN DAPL_EVD * evd_ptr,\r
959                              IN DAT_EVENT_NUMBER event_number,\r
960                              IN DAT_EVENT_DATA * data)\r
961 {\r
962         DAT_EVENT *event_ptr;\r
963 \r
964         event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
965         /*\r
966          * Note event lock may be held on successful return\r
967          * to be released by dapli_evd_post_event(), if provider side locking\r
968          * is needed.\r
969          */\r
970 \r
971         if (event_ptr == NULL) {\r
972                 return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
973                                  DAT_RESOURCE_MEMORY);\r
974         }\r
975 \r
976         event_ptr->event_data = *data;\r
977 \r
978         dapli_evd_post_event(evd_ptr, event_ptr);\r
979 \r
980         return DAT_SUCCESS;\r
981 }\r
982 \r
983 #ifdef DAT_EXTENSIONS\r
984 DAT_RETURN\r
985 dapls_evd_post_cr_event_ext(IN DAPL_SP * sp_ptr,\r
986                             IN DAT_EVENT_NUMBER event_number,\r
987                             IN dp_ib_cm_handle_t ib_cm_handle,\r
988                             IN DAT_COUNT p_size,\r
989                             IN DAT_PVOID p_data, IN DAT_PVOID ext_data)\r
990 {\r
991         DAPL_CR *cr_ptr;\r
992         DAPL_EP *ep_ptr;\r
993         DAT_EVENT *event_ptr;\r
994         DAT_SP_HANDLE sp_handle;\r
995 \r
996         dapl_os_lock(&sp_ptr->header.lock);\r
997         if (sp_ptr->listening == DAT_FALSE) {\r
998                 dapl_os_unlock(&sp_ptr->header.lock);\r
999                 dapl_dbg_log(DAPL_DBG_TYPE_CM,\r
1000                              "---> post_cr_event_ext: conn event on down SP\n");\r
1001                 (void)dapls_ib_reject_connection(ib_cm_handle,\r
1002                                                  DAT_CONNECTION_EVENT_UNREACHABLE,\r
1003                                                  0, NULL);\r
1004                 return DAT_CONN_QUAL_UNAVAILABLE;\r
1005         }\r
1006 \r
1007         /*\r
1008          * RSP connections only allow a single connection. Close\r
1009          * it down NOW so we reject any further connections.\r
1010          */\r
1011         if (sp_ptr->header.handle_type == DAT_HANDLE_TYPE_RSP)\r
1012                 sp_ptr->listening = DAT_FALSE;\r
1013 \r
1014         dapl_os_unlock(&sp_ptr->header.lock);\r
1015 \r
1016         /* allocate new connect request */\r
1017         cr_ptr = dapls_cr_alloc(sp_ptr->header.owner_ia);\r
1018         if (cr_ptr == NULL)\r
1019                 return DAT_INSUFFICIENT_RESOURCES;\r
1020 \r
1021         /* Set up the CR */\r
1022         cr_ptr->sp_ptr = sp_ptr;        /* maintain sp_ptr in case of reject */\r
1023         cr_ptr->param.remote_port_qual = 0;\r
1024         cr_ptr->ib_cm_handle = ib_cm_handle;\r
1025         cr_ptr->param.remote_ia_address_ptr =\r
1026             (DAT_IA_ADDRESS_PTR) & cr_ptr->remote_ia_address;\r
1027 \r
1028         /*\r
1029          * Copy the remote address and private data out of the private_data\r
1030          */\r
1031         cr_ptr->param.private_data = cr_ptr->private_data;\r
1032         cr_ptr->param.private_data_size = p_size;\r
1033         if (p_size)\r
1034                 dapl_os_memcpy(cr_ptr->private_data, p_data, p_size);\r
1035 \r
1036         /* EP will be NULL unless RSP service point */\r
1037         ep_ptr = (DAPL_EP *) sp_ptr->ep_handle;\r
1038 \r
1039         if (sp_ptr->psp_flags == DAT_PSP_PROVIDER_FLAG) {\r
1040                 DAPL_IA *ia_ptr;\r
1041                 /*\r
1042                  * Never true for RSP connections\r
1043                  *\r
1044                  * Create an EP for the user. If we can't allocate an\r
1045                  * EP we are out of resources and need to tell the\r
1046                  * requestor that we cant help them.\r
1047                  */\r
1048                 ia_ptr = sp_ptr->header.owner_ia;\r
1049                 ep_ptr = dapl_ep_alloc(ia_ptr, NULL);\r
1050                 if (ep_ptr == NULL) {\r
1051                         dapls_cr_free(cr_ptr);\r
1052                         /* Invoking function will call dapls_ib_cm_reject() */\r
1053                         return DAT_INSUFFICIENT_RESOURCES;\r
1054                 }\r
1055                 ep_ptr->param.ia_handle = ia_ptr;\r
1056                 ep_ptr->param.local_ia_address_ptr =\r
1057                     (DAT_IA_ADDRESS_PTR) & ia_ptr->hca_ptr->hca_address;\r
1058 \r
1059                 /* Link the EP onto the IA */\r
1060                 dapl_ia_link_ep(ia_ptr, ep_ptr);\r
1061         }\r
1062 \r
1063         cr_ptr->param.local_ep_handle = ep_ptr;\r
1064 \r
1065         if (ep_ptr != NULL) {\r
1066                 /* Assign valid EP fields: RSP and PSP_PROVIDER_FLAG only */\r
1067                 if (sp_ptr->psp_flags == DAT_PSP_PROVIDER_FLAG) {\r
1068                         ep_ptr->param.ep_state =\r
1069                             DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING;\r
1070                 } else {\r
1071                         /* RSP */\r
1072                         dapl_os_assert(sp_ptr->header.handle_type ==\r
1073                                        DAT_HANDLE_TYPE_RSP);\r
1074                         ep_ptr->param.ep_state =\r
1075                             DAT_EP_STATE_PASSIVE_CONNECTION_PENDING;\r
1076                 }\r
1077                 ep_ptr->cm_handle = ib_cm_handle;\r
1078         }\r
1079 \r
1080         /* link the CR onto the SP so we can pick it up later */\r
1081         dapl_sp_link_cr(sp_ptr, cr_ptr);\r
1082 \r
1083         /* assign sp_ptr to union to avoid typecast errors from some compilers */\r
1084         sp_handle.psp_handle = (DAT_PSP_HANDLE) sp_ptr;\r
1085 \r
1086         /* Post the event.  */\r
1087 \r
1088         /*\r
1089          * Note event lock may be held on successful return\r
1090          * to be released by dapli_evd_post_event(), if provider side locking\r
1091          * is needed.\r
1092          */\r
1093         event_ptr = dapli_evd_get_and_init_event(sp_ptr->evd_handle,\r
1094                                                  event_number);\r
1095         if (event_ptr == NULL)\r
1096                 return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
1097                                  DAT_RESOURCE_MEMORY);\r
1098 \r
1099         event_ptr->event_data.cr_arrival_event_data.sp_handle = sp_handle;\r
1100         event_ptr->event_data.cr_arrival_event_data.local_ia_address_ptr =\r
1101             (DAT_IA_ADDRESS_PTR) & sp_ptr->header.owner_ia->hca_ptr->\r
1102             hca_address;\r
1103         event_ptr->event_data.cr_arrival_event_data.conn_qual =\r
1104             sp_ptr->conn_qual;\r
1105         event_ptr->event_data.cr_arrival_event_data.cr_handle =\r
1106             (DAT_HANDLE) cr_ptr;\r
1107 \r
1108         dapl_os_memcpy(&event_ptr->event_extension_data[0], ext_data, 64);\r
1109 \r
1110         dapli_evd_post_event(sp_ptr->evd_handle, event_ptr);\r
1111 \r
1112         return DAT_SUCCESS;\r
1113 }\r
1114 \r
1115 DAT_RETURN\r
1116 dapls_evd_post_connection_event_ext(IN DAPL_EVD * evd_ptr,\r
1117                                     IN DAT_EVENT_NUMBER event_number,\r
1118                                     IN DAT_EP_HANDLE ep_handle,\r
1119                                     IN DAT_COUNT private_data_size,\r
1120                                     IN DAT_PVOID private_data,\r
1121                                     IN DAT_PVOID ext_data)\r
1122 {\r
1123         DAT_EVENT *event_ptr;\r
1124         event_ptr = dapli_evd_get_and_init_event(evd_ptr, event_number);\r
1125         /*\r
1126          * Note event lock may be held on successful return\r
1127          * to be released by dapli_evd_post_event(), if provider side locking\r
1128          * is needed.\r
1129          */\r
1130         if (event_ptr == NULL)\r
1131                 return DAT_ERROR(DAT_INSUFFICIENT_RESOURCES,\r
1132                                  DAT_RESOURCE_MEMORY);\r
1133 \r
1134         event_ptr->event_data.connect_event_data.ep_handle = ep_handle;\r
1135         event_ptr->event_data.connect_event_data.private_data_size\r
1136             = private_data_size;\r
1137         event_ptr->event_data.connect_event_data.private_data = private_data;\r
1138 \r
1139         dapl_os_memcpy(&event_ptr->event_extension_data[0], ext_data, 64);\r
1140 \r
1141         dapli_evd_post_event(evd_ptr, event_ptr);\r
1142 \r
1143         return DAT_SUCCESS;\r
1144 }\r
1145 #endif\r
1146 \r
1147 /*\r
1148  * dapli_evd_cqe_to_event\r
1149  *\r
1150  * Convert a CQE into an event structure.\r
1151  *\r
1152  * Input:\r
1153  *      evd_ptr\r
1154  *      cqe_ptr\r
1155  *\r
1156  * Output:\r
1157  *      event_ptr\r
1158  *\r
1159  * Returns:\r
1160  *      none\r
1161  *\r
1162  */\r
1163 static void\r
1164 dapli_evd_cqe_to_event(IN DAPL_EVD * evd_ptr,\r
1165                        IN void *cqe_ptr, OUT DAT_EVENT * event_ptr)\r
1166 {\r
1167         DAPL_EP *ep_ptr;\r
1168         DAPL_COOKIE *cookie;\r
1169         DAT_DTO_COMPLETION_STATUS dto_status;\r
1170         DAPL_COOKIE_BUFFER *buffer;\r
1171 \r
1172         /*\r
1173          * All that can be relied on if the status is bad is the status\r
1174          * and WRID.\r
1175          */\r
1176         dto_status = dapls_ib_get_dto_status(cqe_ptr);\r
1177 \r
1178         cookie = (DAPL_COOKIE *) (uintptr_t) DAPL_GET_CQE_WRID(cqe_ptr);\r
1179         dapl_os_assert((NULL != cookie));\r
1180 \r
1181         ep_ptr = cookie->ep;\r
1182         dapl_os_assert((NULL != ep_ptr));\r
1183         if (ep_ptr->header.magic != DAPL_MAGIC_EP) {\r
1184                 /* ep may have been freed, just return */\r
1185                 return;\r
1186         }\r
1187 \r
1188         dapls_io_trc_update_completion(ep_ptr, cookie, dto_status);\r
1189 \r
1190         event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr;\r
1191 \r
1192         switch (cookie->type) {\r
1193         case DAPL_COOKIE_TYPE_DTO:\r
1194                 {\r
1195 #ifdef DAT_EXTENSIONS\r
1196                         /* Extended via request post or message receive */\r
1197                         if ((cookie->val.dto.type == DAPL_DTO_TYPE_EXTENSION) ||\r
1198                             (cookie->val.dto.type == DAPL_DTO_TYPE_RECV &&\r
1199                              DAPL_GET_CQE_OPTYPE(cqe_ptr) != OP_RECEIVE)) {\r
1200                                 dapls_cqe_to_event_extension(ep_ptr, cookie,\r
1201                                                              cqe_ptr,\r
1202                                                              event_ptr);\r
1203                                 if (cookie->val.dto.type == DAPL_DTO_TYPE_RECV)\r
1204                                         dapls_cookie_dealloc(&ep_ptr->\r
1205                                                              recv_buffer,\r
1206                                                              cookie);\r
1207                                 else\r
1208                                         dapls_cookie_dealloc(&ep_ptr->\r
1209                                                              req_buffer,\r
1210                                                              cookie);\r
1211                                 break;\r
1212                         }\r
1213 #endif\r
1214 \r
1215                         if (DAPL_DTO_TYPE_RECV == cookie->val.dto.type)\r
1216                                 buffer = &ep_ptr->recv_buffer;\r
1217                         else\r
1218                                 buffer = &ep_ptr->req_buffer;\r
1219 \r
1220                         event_ptr->event_number = DAT_DTO_COMPLETION_EVENT;\r
1221                         event_ptr->event_data.dto_completion_event_data.\r
1222                             ep_handle = cookie->ep;\r
1223                         event_ptr->event_data.dto_completion_event_data.\r
1224                             user_cookie = cookie->val.dto.cookie;\r
1225                         event_ptr->event_data.dto_completion_event_data.status =\r
1226                             dto_status;\r
1227 \r
1228                         if (cookie->val.dto.type == DAPL_DTO_TYPE_SEND ||\r
1229                             cookie->val.dto.type == DAPL_DTO_TYPE_RDMA_WRITE) {\r
1230                                 /* Get size from DTO; CQE value may be off.  */\r
1231                                 event_ptr->event_data.dto_completion_event_data.\r
1232                                     transfered_length = cookie->val.dto.size;\r
1233                         } else {\r
1234                                 event_ptr->event_data.dto_completion_event_data.\r
1235                                     transfered_length =\r
1236                                     DAPL_GET_CQE_BYTESNUM(cqe_ptr);\r
1237                         }\r
1238 \r
1239                         dapls_cookie_dealloc(buffer, cookie);\r
1240                         break;\r
1241                 }\r
1242 \r
1243         case DAPL_COOKIE_TYPE_RMR:\r
1244                 {\r
1245                         event_ptr->event_number = DAT_RMR_BIND_COMPLETION_EVENT;\r
1246 \r
1247                         event_ptr->event_data.rmr_completion_event_data.\r
1248                             rmr_handle = cookie->val.rmr.rmr;\r
1249                         event_ptr->event_data.rmr_completion_event_data.\r
1250                             user_cookie = cookie->val.rmr.cookie;\r
1251                         if (dto_status == DAT_DTO_SUCCESS) {\r
1252                                 event_ptr->event_data.rmr_completion_event_data.\r
1253                                     status = DAT_RMR_BIND_SUCCESS;\r
1254                                 dapl_os_assert((DAPL_GET_CQE_OPTYPE(cqe_ptr)) ==\r
1255                                                OP_BIND_MW);\r
1256                         } else {\r
1257                                 dapl_dbg_log(DAPL_DBG_TYPE_DTO_COMP_ERR,\r
1258                                              " MW bind completion ERROR: %d: op %#x ep: %p\n",\r
1259                                              dto_status,\r
1260                                              DAPL_GET_CQE_OPTYPE(cqe_ptr),\r
1261                                              ep_ptr);\r
1262                                 event_ptr->event_data.rmr_completion_event_data.\r
1263                                     status = DAT_RMR_OPERATION_FAILED;\r
1264                                 dapl_os_atomic_dec(&cookie->val.rmr.rmr->lmr->\r
1265                                                    lmr_ref_count);\r
1266                         }\r
1267 \r
1268                         dapls_cookie_dealloc(&ep_ptr->req_buffer, cookie);\r
1269                         break;\r
1270                 }\r
1271         default:\r
1272                 {\r
1273                         dapl_os_assert(!"Invalid Operation type");\r
1274                         break;\r
1275                 }\r
1276         }                       /* end switch */\r
1277 \r
1278         /*\r
1279          * Most error DTO ops result in disconnecting the EP. See\r
1280          * IBTA Vol 1.1, Chapter 10,Table 68, for expected effect on\r
1281          * state.\r
1282          */\r
1283         if ((dto_status != DAT_DTO_SUCCESS) &&\r
1284             (dto_status != DAT_DTO_ERR_FLUSHED)) {\r
1285                 DAPL_EVD *evd_ptr;\r
1286 \r
1287                 /*\r
1288                  * If we are connected, generate disconnect and generate an\r
1289                  * event. We may be racing with other disconnect ops, so we\r
1290                  * need to check. We may also be racing CM connection events,\r
1291                  * requiring us to check for connection pending states too.\r
1292                  */\r
1293                 dapl_os_lock(&ep_ptr->header.lock);\r
1294                 if (ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED ||\r
1295                     ep_ptr->param.ep_state ==\r
1296                     DAT_EP_STATE_ACTIVE_CONNECTION_PENDING\r
1297                     || ep_ptr->param.ep_state ==\r
1298                     DAT_EP_STATE_PASSIVE_CONNECTION_PENDING\r
1299                     || ep_ptr->param.ep_state ==\r
1300                     DAT_EP_STATE_COMPLETION_PENDING)\r
1301                 {\r
1302                         ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED;\r
1303                         dapl_os_unlock(&ep_ptr->header.lock);\r
1304                         dapls_io_trc_dump(ep_ptr, cqe_ptr, dto_status);\r
1305 \r
1306                         /* Let the other side know we have disconnected */\r
1307                         (void)dapls_ib_disconnect(ep_ptr,\r
1308                                                   DAT_CLOSE_ABRUPT_FLAG);\r
1309 \r
1310                         /* ... and clean up the local side */\r
1311                         evd_ptr = (DAPL_EVD *) ep_ptr->param.connect_evd_handle;\r
1312                         if (evd_ptr != NULL) {\r
1313                                 dapls_evd_post_connection_event(evd_ptr,\r
1314                                                                 DAT_CONNECTION_EVENT_BROKEN,\r
1315                                                                 (DAT_HANDLE)\r
1316                                                                 ep_ptr, 0, 0);\r
1317                         }\r
1318                 } else {\r
1319                         dapl_os_unlock(&ep_ptr->header.lock);\r
1320                 }\r
1321 \r
1322                 dapl_log(DAPL_DBG_TYPE_ERR,\r
1323                          "DTO completion ERR: status %d, op %s, vendor_err 0x%x - %s\n",\r
1324                          DAPL_GET_CQE_STATUS(cqe_ptr),\r
1325                          DAPL_GET_DTO_OP_STR(cookie->val.dto.type),\r
1326                          DAPL_GET_CQE_VENDOR_ERR(cqe_ptr),\r
1327                          inet_ntoa(((struct sockaddr_in *)&ep_ptr->\r
1328                                     remote_ia_address)->sin_addr));\r
1329         }\r
1330 }\r
1331 \r
1332 /*\r
1333  * dapls_evd_copy_cq\r
1334  *\r
1335  * Copy all entries on a CQ associated with the EVD onto that EVD\r
1336  * Up to caller to handle races, if any.  Note that no EVD waiters will\r
1337  * be awoken by this copy.\r
1338  *\r
1339  * Input:\r
1340  *      evd_ptr\r
1341  *\r
1342  * Output:\r
1343  *      None\r
1344  *\r
1345  * Returns:\r
1346  *      none\r
1347  *\r
1348  */\r
1349 void dapls_evd_copy_cq(DAPL_EVD * evd_ptr)\r
1350 {\r
1351         ib_work_completion_t cur_cqe;\r
1352         DAT_RETURN dat_status;\r
1353         DAT_EVENT *event;\r
1354 \r
1355         if (evd_ptr->ib_cq_handle == IB_INVALID_HANDLE) {\r
1356                 /* Nothing to do if no CQ.  */\r
1357                 return;\r
1358         }\r
1359 \r
1360         while (1) {\r
1361                 dat_status =\r
1362                     dapls_ib_completion_poll(evd_ptr->header.owner_ia->hca_ptr,\r
1363                                              evd_ptr, &cur_cqe);\r
1364 \r
1365                 if (dat_status != DAT_SUCCESS) {\r
1366                         break;\r
1367                 }\r
1368 \r
1369                 /* For debugging.  */\r
1370                 dapli_evd_eh_print_cqe(&cur_cqe);\r
1371 \r
1372                 /*\r
1373                  * Can use DAT_DTO_COMPLETION_EVENT because dapli_evd_cqe_to_event\r
1374                  * will overwrite.\r
1375                  */\r
1376 \r
1377                 event =\r
1378                     dapli_evd_get_and_init_event(evd_ptr,\r
1379                                                  DAT_DTO_COMPLETION_EVENT);\r
1380                 if (event == NULL) {\r
1381                         /* We've already attempted the overflow post; return.  */\r
1382                         return;\r
1383                 }\r
1384 \r
1385                 dapli_evd_cqe_to_event(evd_ptr, &cur_cqe, event);\r
1386 \r
1387                 dapli_evd_post_event_nosignal(evd_ptr, event);\r
1388         }\r
1389 \r
1390         if (DAT_GET_TYPE(dat_status) != DAT_QUEUE_EMPTY) {\r
1391                 dapl_dbg_log(DAPL_DBG_TYPE_EVD,\r
1392                              "dapls_evd_copy_cq: dapls_ib_completion_poll returned 0x%x\n",\r
1393                              dat_status);\r
1394                 dapl_os_assert(!"Bad return from dapls_ib_completion_poll");\r
1395         }\r
1396 }\r
1397 \r
1398 /*\r
1399  * dapls_evd_cq_poll_to_event\r
1400  *\r
1401  * Attempt to dequeue a single CQE from a CQ and turn it into\r
1402  * an event.\r
1403  *\r
1404  * Input:\r
1405  *      evd_ptr\r
1406  *\r
1407  * Output:\r
1408  *      event\r
1409  *\r
1410  * Returns:\r
1411  *      Status of operation\r
1412  *\r
1413  */\r
1414 DAT_RETURN\r
1415 dapls_evd_cq_poll_to_event(IN DAPL_EVD * evd_ptr, OUT DAT_EVENT * event)\r
1416 {\r
1417         DAT_RETURN dat_status;\r
1418         ib_work_completion_t cur_cqe;\r
1419 \r
1420         dat_status = dapls_ib_completion_poll(evd_ptr->header.owner_ia->hca_ptr,\r
1421                                               evd_ptr, &cur_cqe);\r
1422         if (dat_status == DAT_SUCCESS) {\r
1423                 /* For debugging.  */\r
1424                 dapli_evd_eh_print_cqe(&cur_cqe);\r
1425 \r
1426                 dapli_evd_cqe_to_event(evd_ptr, &cur_cqe, event);\r
1427         }\r
1428 \r
1429         return dat_status;\r
1430 }\r
1431 \r
1432 #ifdef DAPL_DBG_IO_TRC\r
1433 /*\r
1434  * Update I/O completions in the I/O trace buffer. I/O is posted to\r
1435  * the buffer, then we find it here using the cookie and mark it\r
1436  * completed with the completion status\r
1437  */\r
1438 void\r
1439 dapls_io_trc_update_completion(DAPL_EP * ep_ptr,\r
1440                                DAPL_COOKIE * cookie,\r
1441                                DAT_DTO_COMPLETION_STATUS dto_status)\r
1442 {\r
1443         int i;\r
1444         static unsigned int c_cnt = 1;\r
1445 \r
1446         for (i = 0; i < DBG_IO_TRC_QLEN; i++) {\r
1447                 if (ep_ptr->ibt_base[i].cookie == cookie) {\r
1448                         ep_ptr->ibt_base[i].status = dto_status;\r
1449                         ep_ptr->ibt_base[i].done = c_cnt++;\r
1450                 }\r
1451         }\r
1452 }\r
1453 \r
1454 /*\r
1455  * Dump the I/O trace buffers\r
1456  */\r
1457 void\r
1458 dapls_io_trc_dump(DAPL_EP * ep_ptr,\r
1459                   void *cqe_ptr, DAT_DTO_COMPLETION_STATUS dto_status)\r
1460 {\r
1461         struct io_buf_track *ibt;\r
1462         int i;\r
1463         int cnt;\r
1464 \r
1465         dapl_os_printf("DISCONNECTING: dto_status     = %x\n", dto_status);\r
1466         dapl_os_printf("               OpType        = %x\n",\r
1467                        DAPL_GET_CQE_OPTYPE(cqe_ptr));\r
1468         dapl_os_printf("               Bytes         = %x\n",\r
1469                        DAPL_GET_CQE_BYTESNUM(cqe_ptr));\r
1470         dapl_os_printf("               WRID (cookie) = %llx\n",\r
1471                        DAPL_GET_CQE_WRID(cqe_ptr));\r
1472 \r
1473         if (ep_ptr->ibt_dumped == 0) {\r
1474 \r
1475                 dapl_os_printf("EP %p (qpn %d) I/O trace buffer\n",\r
1476                                ep_ptr, ep_ptr->qpn);\r
1477 \r
1478                 ep_ptr->ibt_dumped = 1;\r
1479                 ibt =\r
1480                     (struct io_buf_track *)dapls_rbuf_remove(&ep_ptr->\r
1481                                                              ibt_queue);\r
1482                 cnt = DBG_IO_TRC_QLEN;\r
1483                 while (ibt != NULL && cnt > 0) {\r
1484                         dapl_os_printf\r
1485                             ("%2d. %3s (%2d, %d) OP: %x cookie %p wqe %p rmv_target_addr %llx rmv_rmr_context %x\n",\r
1486                              cnt, ibt->done == 0 ? "WRK" : "DON", ibt->status,\r
1487                              ibt->done, ibt->op_type, ibt->cookie, ibt->wqe,\r
1488                              ibt->remote_iov.target_address,\r
1489                              ibt->remote_iov.rmr_context);\r
1490                         for (i = 0; i < 3; i++) {\r
1491                                 if (ibt->iov[i].segment_length != 0) {\r
1492                                         dapl_os_printf\r
1493                                             ("     (%4llx, %8x, %8llx)\n",\r
1494                                              ibt->iov[i].segment_length,\r
1495                                              ibt->iov[i].lmr_context,\r
1496                                              ibt->iov[i].virtual_address);\r
1497                                 }\r
1498                         }\r
1499                         ibt =\r
1500                             (struct io_buf_track *)dapls_rbuf_remove(&ep_ptr->\r
1501                                                                      ibt_queue);\r
1502                         cnt--;\r
1503                 }\r
1504         }\r
1505 }\r
1506 #endif                          /* DAPL_DBG_IO_TRC */\r
1507 \r
1508 /*\r
1509  * Local variables:\r
1510  *  c-indent-level: 4\r
1511  *  c-basic-offset: 4\r
1512  *  tab-width: 8\r
1513  * End:\r
1514  */\r