2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 * Must be packed because start is 64 bits but only aligned to 32 bits.
42 struct mlx4_eq_context {
56 __be32 mtt_base_addr_l;
58 __be32 consumer_index;
59 __be32 producer_index;
63 #define MLX4_EQ_STATUS_OK ( 0 << 28)
64 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
65 #define MLX4_EQ_OWNER_SW ( 0 << 24)
66 #define MLX4_EQ_OWNER_HW ( 1 << 24)
67 #define MLX4_EQ_FLAG_EC ( 1 << 18)
68 #define MLX4_EQ_FLAG_OI ( 1 << 17)
69 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
70 #define MLX4_EQ_STATE_FIRED (10 << 8)
71 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
73 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
74 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
75 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
76 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
77 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
78 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
79 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
80 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
81 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
82 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
83 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
84 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
85 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
86 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
87 (1ull << MLX4_EVENT_TYPE_CMD))
89 #pragma warning( disable : 4706)
90 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
96 static const uint32_t cDpcMaxTime = 10000; //max time to spend in a while loop
98 uint64_t start = cl_get_time_stamp();
100 while ((eqe = next_eqe_sw(eq))) {
102 * Make sure we read EQ entry contents after we've
103 * checked the ownership bit.
108 case MLX4_EVENT_TYPE_COMP:
109 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
110 mlx4_cq_completion(dev, cqn);
113 case MLX4_EVENT_TYPE_PATH_MIG:
114 case MLX4_EVENT_TYPE_COMM_EST:
115 case MLX4_EVENT_TYPE_SQ_DRAINED:
116 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
117 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
118 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
119 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
120 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
121 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
125 case MLX4_EVENT_TYPE_SRQ_LIMIT:
126 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
127 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
131 case MLX4_EVENT_TYPE_CMD:
133 be16_to_cpu(eqe->event.cmd.token),
134 eqe->event.cmd.status,
135 be64_to_cpu(eqe->event.cmd.out_param));
138 case MLX4_EVENT_TYPE_PORT_CHANGE:
139 mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
140 be32_to_cpu(eqe->event.port_change.port) >> 28);
143 case MLX4_EVENT_TYPE_CQ_ERROR:
144 mlx4_warn(dev, "CQ %s on CQN %06x\n",
145 eqe->event.cq_err.syndrome == 1 ?
146 "overrun" : "access violation",
147 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
148 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
152 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
153 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
156 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
157 case MLX4_EVENT_TYPE_ECC_DETECT:
159 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
160 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
170 * The HCA will think the queue has overflowed if we
171 * don't tell it we've been processing events. We
172 * create our EQs with MLX4_NUM_SPARE_EQE extra
173 * entries, so we must update our consumer index at
176 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
178 * Conditional on hca_type is OK here because
179 * this is a rare case, not the fast path.
185 if (cl_get_time_stamp() - start > cDpcMaxTime ) {
186 break; //allow other DPCs as well
194 #pragma warning(disable:4706)
196 static void mlx4_dpc( PRKDPC dpc,
197 PVOID ctx, PVOID arg1, PVOID arg2 )
199 struct mlx4_eq *eq = ctx;
201 UNREFERENCED_PARAMETER(dpc);
202 UNREFERENCED_PARAMETER(arg1);
203 UNREFERENCED_PARAMETER(arg2);
205 spin_lock_dpc(&eq->lock);
206 mlx4_eq_int(eq->dev, eq);
207 spin_unlock_dpc(&eq->lock);
210 static BOOLEAN mlx4_interrupt(
211 IN struct _KINTERRUPT *Interrupt,
212 IN PVOID ServiceContext
215 struct mlx4_dev *dev = ServiceContext;
216 struct mlx4_priv *priv = mlx4_priv(dev);
220 UNUSED_PARAM(Interrupt);
222 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
224 for (i = 0; i < MLX4_NUM_EQ; ++i) {
225 if ( next_eqe_sw(&priv->eq_table.eq[i]) ) {
227 /* another interrupt may happen instantly after writel above.
228 If it comes to another processor, mlx4_interrupt will be called
229 and try to schedule the same DPC. So we protect KeInsertQueueDpc
232 while(InterlockedCompareExchange(&dev->pdev->dpc_lock, 1, 0));
234 KeInsertQueueDpc(&priv->eq_table.eq[i].dpc, NULL, NULL);
235 InterlockedCompareExchange(&dev->pdev->dpc_lock, 0, 1);
237 /* re-arm the EQ for a case when interrupt comes before EQE
238 and we didn't scheduled the DPC */
239 eq_set_ci(&priv->eq_table.eq[i], 1);
243 for (i = MLX4_NUM_EQ; i <= priv->eq_table.max_extra_eqs; ++i) {
244 if (priv->eq_table.eq[i].isr) {
246 if ( next_eqe_sw(&priv->eq_table.eq[i]) ) {
247 ret = priv->eq_table.eq[i].isr(priv->eq_table.eq[i].ctx);
250 eq_set_ci(&priv->eq_table.eq[i], 1);
255 return (BOOLEAN)work;
258 #ifdef CONFIG_PCI_MSI
261 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
263 struct mlx4_eq *eq = eq_ptr;
264 struct mlx4_dev *dev = eq->dev;
266 mlx4_eq_int(dev, eq);
268 /* MSI-X vectors always belong to us */
274 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
277 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
278 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
281 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
284 return mlx4_cmd(dev, mailbox->dma.da, eq_num, 0, MLX4_CMD_SW2HW_EQ,
285 MLX4_CMD_TIME_CLASS_A);
288 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
291 return mlx4_cmd_box(dev, 0, mailbox->dma.da, eq_num, 0, MLX4_CMD_HW2SW_EQ,
292 MLX4_CMD_TIME_CLASS_A);
295 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
297 struct mlx4_priv *priv = mlx4_priv(dev);
300 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
302 if (!priv->eq_table.uar_map[index]) {
303 priv->eq_table.uar_map[index] =
304 ioremap(pci_resource_start(dev->pdev, 2) +
305 ((eq->eqn / 4) << PAGE_SHIFT),
307 if (!priv->eq_table.uar_map[index]) {
308 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
314 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
317 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
318 u8 intr, struct mlx4_eq *eq)
320 struct mlx4_priv *priv = mlx4_priv(dev);
321 struct mlx4_cmd_mailbox *mailbox;
322 struct mlx4_eq_context *eq_context;
324 u64 *dma_list = NULL;
331 eq->nent = roundup_pow_of_two(max(nent, 2));
332 npages = (int)(NEXT_PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE);
334 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
339 for (i = 0; i < npages; ++i)
340 eq->page_list[i].buf = NULL;
342 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
346 mailbox = mlx4_alloc_cmd_mailbox(dev);
349 eq_context = (struct mlx4_eq_context *)mailbox->buf;
351 for (i = 0; i < npages; ++i) {
352 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
353 PAGE_SIZE, &t, GFP_KERNEL);
354 if (!eq->page_list[i].buf)
355 goto err_out_free_pages;
358 eq->page_list[i].map = t;
360 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
363 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
365 goto err_out_free_pages;
367 eq->doorbell = mlx4_get_eq_uar(dev, eq);
370 goto err_out_free_eq;
373 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
375 goto err_out_free_eq;
377 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
379 goto err_out_free_mtt;
381 memset(eq_context, 0, sizeof *eq_context);
382 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
383 MLX4_EQ_STATE_ARMED);
384 eq_context->log_eq_size = (u8)ilog2(eq->nent);
385 eq_context->intr = intr;
386 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
388 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
389 eq_context->mtt_base_addr_h = (u8)(mtt_addr >> 32);
390 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
392 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
394 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
395 goto err_out_free_mtt;
399 mlx4_free_cmd_mailbox(dev, mailbox);
406 mlx4_mtt_cleanup(dev, &eq->mtt);
409 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
412 for (i = 0; i < npages; ++i)
413 if (eq->page_list[i].buf)
414 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
415 eq->page_list[i].buf,
416 eq->page_list[i].map);
418 mlx4_free_cmd_mailbox(dev, mailbox);
421 kfree(eq->page_list);
428 static void mlx4_free_eq(struct mlx4_dev *dev,
431 struct mlx4_priv *priv = mlx4_priv(dev);
432 struct mlx4_cmd_mailbox *mailbox;
434 int npages = (int)(NEXT_PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE);
437 mailbox = mlx4_alloc_cmd_mailbox(dev);
441 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
443 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
448 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
449 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
451 printk("[%02x] ", i * 4);
452 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
453 if ((i + 1) % 4 == 0)
459 mlx4_mtt_cleanup(dev, &eq->mtt);
460 for (i = 0; i < npages; ++i)
461 pci_free_consistent(dev->pdev, PAGE_SIZE,
462 eq->page_list[i].buf,
463 eq->page_list[i].map);
465 kfree(eq->page_list);
466 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
467 mlx4_free_cmd_mailbox(dev, mailbox);
470 static void mlx4_free_irqs(struct mlx4_dev *dev)
472 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
474 if (eq_table->have_irq)
475 free_irq(dev->pdev->int_obj);
477 #ifdef CONFIG_PCI_MSI
480 for (i = 0; i < MLX4_NUM_EQ; ++i)
481 if (eq_table->eq[i].have_irq)
482 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
487 static int mlx4_map_clr_int(struct mlx4_dev *dev)
489 struct mlx4_priv *priv = mlx4_priv(dev);
491 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
492 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
493 if (!priv->clr_base) {
494 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
501 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
503 struct mlx4_priv *priv = mlx4_priv(dev);
505 iounmap(priv->clr_base, MLX4_CLR_INT_SIZE);
508 int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
510 struct mlx4_priv *priv = mlx4_priv(dev);
514 * We assume that mapping one page is enough for the whole EQ
515 * context table. This is fine with all current HCAs, because
516 * we only use 32 EQs and each EQ uses 64 bytes of context
517 * memory, or 1 KB total.
519 priv->eq_table.icm_virt = icm_virt;
520 priv->eq_table.icm_page = alloc_page(dev->pdev, GFP_HIGHUSER);
521 if (!priv->eq_table.icm_page.da)
523 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
524 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
525 if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
526 __free_page(dev->pdev, priv->eq_table.icm_page);
530 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma.da, icm_virt);
532 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
533 PCI_DMA_BIDIRECTIONAL);
534 __free_page(dev->pdev, priv->eq_table.icm_page);
536 mlx4_dbg(dev,"mlx4_MAP_ICM_page: dma %#I64x, icm_virt %#I64x\n", priv->eq_table.icm_dma.da, icm_virt);
541 void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
543 struct mlx4_priv *priv = mlx4_priv(dev);
545 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
546 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
547 PCI_DMA_BIDIRECTIONAL);
548 __free_page(dev->pdev, priv->eq_table.icm_page);
551 int mlx4_init_eq_table(struct mlx4_dev *dev)
553 struct mlx4_priv *priv = mlx4_priv(dev);
557 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
558 dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
562 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
563 priv->eq_table.uar_map[i] = NULL;
565 err = mlx4_map_clr_int(dev);
569 priv->eq_table.clr_mask =
570 swab32(1 << (priv->eq_table.inta_pin & 31));
571 priv->eq_table.clr_int = priv->clr_base +
572 (priv->eq_table.inta_pin < 32 ? 4 : 0);
574 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
575 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
576 &priv->eq_table.eq[MLX4_EQ_COMP]);
580 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
581 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
582 &priv->eq_table.eq[MLX4_EQ_ASYNC]);
586 #ifdef CONFIG_PCI_MSI
587 if (dev->flags & MLX4_FLAG_MSI_X) {
588 static const char *eq_name[] = {
589 [MLX4_EQ_COMP] = DRV_NAME " (comp)",
590 [MLX4_EQ_ASYNC] = DRV_NAME " (async)"
593 #ifdef USE_WDM_INTERRUPTS
595 for (i = 0; i < MLX4_NUM_EQ; ++i) {
596 err = request_irq( dev,
597 dev->pdev->int_info.u.Interrupt.Vector,
599 mlx4_dpc, mlx4_msi_x_interrupt,
600 &dev->pdev->int_obj );
604 err = request_irq(priv->eq_table.eq[i].irq,
605 mlx4_msi_x_interrupt,
606 priv->eq_table.eq + i, eq_name[i], );
610 priv->eq_table.eq[i].have_irq = 1;
614 #error MSI support is not implemented for WDF model
620 #ifdef USE_WDM_INTERRUPTS
621 err = request_irq( dev,
622 dev->pdev->int_info.u.Interrupt.Vector,
625 &dev->pdev->int_obj );
629 dev->pdev->dpc_lock = 0;
630 for (i = 0; i < MLX4_NUM_EQ; ++i) {
631 struct mlx4_eq * eq = &priv->eq_table.eq[i];
632 spin_lock_init( &eq->lock );
633 eq->isr = mlx4_interrupt;
635 KeInitializeDpc( &eq->dpc, mlx4_dpc, eq);
639 priv->eq_table.have_irq = 1;
642 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
643 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
645 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
646 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
648 for (i = 0; i < MLX4_NUM_EQ; ++i)
649 eq_set_ci(&priv->eq_table.eq[i], 1);
653 #ifdef USE_WDM_INTERRUPTS
655 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
659 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
662 mlx4_unmap_clr_int(dev);
666 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
671 int mlx4_add_eq(struct mlx4_dev *dev, int nent,
672 u8 intr, PISR_FUNC func, PVOID func_context ,
673 u8* p_eq_num, struct mlx4_eq ** p_eq)
675 struct mlx4_priv *priv = mlx4_priv(dev);
679 UNREFERENCED_PARAMETER(intr);
681 if ( mlx4_is_barred(dev) )
684 for (i = MLX4_NUM_EQ; i < MLX4_NUM_EQ + MLX4_MAX_EXTRA_EQS ; i++) {
685 if(priv->eq_table.eq[MLX4_NUM_EQ].isr == NULL) {
693 err = mlx4_create_eq(dev, nent,
694 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
695 &priv->eq_table.eq[new_eq]);
699 *p_eq = &priv->eq_table.eq[new_eq ];
701 priv->eq_table.eq[new_eq].isr = func;
702 priv->eq_table.eq[new_eq].ctx = func_context;
703 priv->eq_table.max_extra_eqs = max(priv->eq_table.max_extra_eqs, new_eq);
707 void mlx4_remove_eq(struct mlx4_dev *dev, u8 eq_num)
709 struct mlx4_priv *priv = mlx4_priv(dev);
711 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
713 priv->eq_table.eq[eq_num].isr = NULL;
714 priv->eq_table.eq[eq_num].ctx = NULL;
716 if (priv->eq_table.max_extra_eqs == eq_num)
717 priv->eq_table.max_extra_eqs--;
719 mlx4_free_eq(dev, &priv->eq_table.eq[eq_num]);
722 if (eq_table->have_irq) {
723 free_irq(dev->pdev->int_obj);
726 err = request_irq( dev,
727 dev->pdev->int_info.u.Interrupt.Vector,
730 &dev->pdev->int_obj );
731 // BUGBUG: how should the error be propogated ?
735 void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
737 struct mlx4_priv *priv = mlx4_priv(dev);
740 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
741 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
745 for (i = 0; i < MLX4_NUM_EQ; ++i)
746 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
748 mlx4_unmap_clr_int(dev);
750 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
751 if (priv->eq_table.uar_map[i])
752 iounmap(priv->eq_table.uar_map[i],PAGE_SIZE);
754 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);