2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
42 #include "complib\cl_thread.h"
44 // TODO: put into Globals
45 #ifdef CONFIG_MLX4_DEBUG
46 // "Enable debug tracing if > 0"
48 #endif /* CONFIG_MLX4_DEBUG */
52 // "attempt to use MSI-X if nonzero"
55 #else /* CONFIG_PCI_MSI */
59 #endif /* CONFIG_PCI_MSI */
61 static struct mlx4_profile default_profile = {
63 1 << 4, /* rdmarc_per_qp */
64 1 << 16, /* num_srq */
66 1 << 13, /* num_mcg */
67 1 << 17, /* num_mpt */
71 static void process_mod_param_profile(void)
74 default_profile.num_qp = 1 << g.mod_num_qp;
76 if (g.mod_rdmarc_per_qp)
77 default_profile.rdmarc_per_qp = 1 << g.mod_rdmarc_per_qp;
80 default_profile.num_srq = 1 << g.mod_num_srq;
83 default_profile.num_cq = 1 << g.mod_num_cq;
86 default_profile.num_mcg = 1 << g.mod_num_mcg;
89 default_profile.num_mpt = 1 << g.mod_num_mpt;
92 default_profile.num_mtt = 1 << g.mod_num_mtt;
95 static struct pci_device_id
97 HCA(MELLANOX, SDR, HERMON),
98 HCA(MELLANOX, DDR, HERMON),
99 HCA(MELLANOX, QDR, HERMON),
100 HCA(MELLANOX, DDR_G2, HERMON),
101 HCA(MELLANOX, QDR_G2, HERMON),
102 HCA(MELLANOX, BD, LIVEFISH),
104 #define MLX4_PCI_TABLE_SIZE (sizeof(mlx4_pci_table)/sizeof(struct pci_device_id))
106 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
111 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
113 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
117 if (dev_cap->min_page_sz > PAGE_SIZE) {
118 mlx4_err(dev, "HCA minimum page size of %d bigger than "
119 "kernel PAGE_SIZE of %ld, aborting.\n",
120 dev_cap->min_page_sz, PAGE_SIZE);
123 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
124 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
126 dev_cap->num_ports, MLX4_MAX_PORTS);
130 if (dev_cap->uar_size > (int)pci_resource_len(dev->pdev, 2)) {
131 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
132 "PCI resource 2 size of 0x%llx, aborting.\n",
134 (unsigned long long) pci_resource_len(dev->pdev, 2));
138 dev->caps.num_ports = dev_cap->num_ports;
139 for (i = 1; i <= dev->caps.num_ports; ++i) {
140 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
141 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i];
142 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
143 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
144 dev->caps.port_width_cap[i] = (u8)dev_cap->max_port_width[i];
147 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
148 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
149 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
150 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
151 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
152 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
153 dev->caps.max_wqes = dev_cap->max_qp_sz;
154 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
155 dev->caps.reserved_qps = dev_cap->reserved_qps;
156 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
157 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
158 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
159 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
160 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
161 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
163 * Subtract 1 from the limit because we need to allocate a
164 * spare CQE so the HCA HW can tell the difference between an
165 * empty CQ and a full CQ.
167 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
168 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
169 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
170 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
171 MLX4_MTT_ENTRY_PER_SEG);
172 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
173 dev->caps.reserved_uars = dev_cap->reserved_uars;
174 dev->caps.reserved_pds = dev_cap->reserved_pds;
175 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
176 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
177 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
178 dev->caps.flags = dev_cap->flags;
179 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
184 static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
186 struct mlx4_priv *priv = mlx4_priv(dev);
189 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
190 GFP_HIGHUSER | __GFP_NOWARN, 0);
191 if (!priv->fw.fw_icm) {
192 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
196 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
198 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
202 err = mlx4_RUN_FW(dev);
204 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
214 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
218 static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
221 struct mlx4_priv *priv = mlx4_priv(dev);
224 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
226 ((u64) (MLX4_CMPT_TYPE_QP *
227 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
228 cmpt_entry_sz, dev->caps.num_qps,
229 dev->caps.reserved_qps, 0, 0);
233 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
235 ((u64) (MLX4_CMPT_TYPE_SRQ *
236 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
237 cmpt_entry_sz, dev->caps.num_srqs,
238 dev->caps.reserved_srqs, 0, 0);
242 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
244 ((u64) (MLX4_CMPT_TYPE_CQ *
245 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
246 cmpt_entry_sz, dev->caps.num_cqs,
247 dev->caps.reserved_cqs, 0, 0);
251 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
253 ((u64) (MLX4_CMPT_TYPE_EQ *
254 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
256 roundup_pow_of_two(MLX4_NUM_EQ +
257 dev->caps.reserved_eqs),
258 MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0);
265 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
268 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
271 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
277 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
278 struct mlx4_init_hca_param *init_hca, u64 icm_size)
280 struct mlx4_priv *priv = mlx4_priv(dev);
284 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
286 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
290 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
291 (unsigned long long) icm_size >> 10,
292 (unsigned long long) aux_pages << 2);
294 priv->fw.aux_icm = mlx4_alloc_icm(dev, (int)aux_pages,
295 GFP_HIGHUSER | __GFP_NOWARN, 0);
296 if (!priv->fw.aux_icm) {
297 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
301 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
303 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
307 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
309 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
313 err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
315 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
320 * Reserved MTT entries must be aligned up to a cacheline
321 * boundary, since the FW will write to them, while the driver
322 * writes to all other MTT entries. (The variable
323 * dev->caps.mtt_entry_sz below is really the MTT segment
324 * size, not the raw entry size)
326 dev->caps.reserved_mtts =
327 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
328 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
329 if ( dev->pdev->p_self_do->AlignmentRequirement + 1 != dma_get_cache_alignment()) {
330 mlx4_dbg(dev, "Cache-line size %d, recommended value %d.\n",
331 dev->pdev->p_self_do->AlignmentRequirement + 1,
332 dma_get_cache_alignment() );
335 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
337 dev->caps.mtt_entry_sz,
338 dev->caps.num_mtt_segs,
339 dev->caps.reserved_mtts, 1, 0);
341 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
345 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
347 dev_cap->dmpt_entry_sz,
349 dev->caps.reserved_mrws, 1, 1);
351 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
355 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
357 dev_cap->qpc_entry_sz,
359 dev->caps.reserved_qps, 0, 0);
361 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
365 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
367 dev_cap->aux_entry_sz,
369 dev->caps.reserved_qps, 0, 0);
371 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
375 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
377 dev_cap->altc_entry_sz,
379 dev->caps.reserved_qps, 0, 0);
381 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
385 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
386 init_hca->rdmarc_base,
387 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
389 dev->caps.reserved_qps, 0, 0);
391 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
395 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
397 dev_cap->cqc_entry_sz,
399 dev->caps.reserved_cqs, 0, 0);
401 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
402 goto err_unmap_rdmarc;
405 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
407 dev_cap->srq_entry_sz,
409 dev->caps.reserved_srqs, 0, 0);
411 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
416 * It's not strictly required, but for simplicity just map the
417 * whole multicast group table now. The table isn't very big
418 * and it's a lot easier than trying to track ref counts.
420 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
421 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
422 dev->caps.num_mgms + dev->caps.num_amgms,
423 dev->caps.num_mgms + dev->caps.num_amgms,
426 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
433 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
436 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
439 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
442 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
445 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
448 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
451 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
454 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
457 mlx4_unmap_eq_icm(dev);
460 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
461 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
462 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
463 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
466 mlx4_UNMAP_ICM_AUX(dev);
469 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
474 static void mlx4_free_icms(struct mlx4_dev *dev)
476 struct mlx4_priv *priv = mlx4_priv(dev);
478 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
479 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
480 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
481 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
482 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
483 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
484 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
485 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
486 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
487 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
488 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
489 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
490 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
491 mlx4_unmap_eq_icm(dev);
493 mlx4_UNMAP_ICM_AUX(dev);
494 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
497 static void mlx4_close_hca(struct mlx4_dev *dev)
499 mlx4_CLOSE_HCA(dev, 0);
502 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
505 static int mlx4_init_hca(struct mlx4_dev *dev)
507 struct mlx4_priv *priv = mlx4_priv(dev);
508 struct mlx4_adapter adapter;
509 struct mlx4_dev_cap dev_cap;
510 struct mlx4_profile profile;
511 struct mlx4_init_hca_param init_hca;
515 err = mlx4_QUERY_FW(dev);
517 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
521 err = mlx4_load_fw(dev);
523 mlx4_err(dev, "Failed to start FW, aborting.\n");
527 err = mlx4_dev_cap(dev, &dev_cap);
529 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
533 process_mod_param_profile();
534 profile = default_profile;
536 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
537 if ((long long) icm_size < 0) {
542 init_hca.log_uar_sz = (u8)ilog2(dev->caps.num_uars);
544 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
548 err = mlx4_INIT_HCA(dev, &init_hca);
550 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
554 err = mlx4_QUERY_ADAPTER(dev, &adapter);
556 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
560 priv->eq_table.inta_pin = adapter.inta_pin;
561 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
573 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
578 static int mlx4_setup_hca(struct mlx4_dev *dev)
580 struct mlx4_priv *priv = mlx4_priv(dev);
583 err = mlx4_init_uar_table(dev);
585 mlx4_err(dev, "Failed to initialize "
586 "user access region table, aborting.\n");
590 err = mlx4_uar_alloc(dev, &priv->driver_uar);
592 mlx4_err(dev, "Failed to allocate driver access region, "
594 goto err_uar_table_free;
597 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
599 mlx4_err(dev, "Couldn't map kernel access region, "
605 err = mlx4_init_pd_table(dev);
607 mlx4_err(dev, "Failed to initialize "
608 "protection domain table, aborting.\n");
612 err = mlx4_init_mr_table(dev);
614 mlx4_err(dev, "Failed to initialize "
615 "memory region table, aborting.\n");
616 goto err_pd_table_free;
620 err = mlx4_init_eq_table(dev);
622 mlx4_err(dev, "Failed to initialize "
623 "event queue table, aborting.\n");
624 goto err_mr_table_free;
627 err = mlx4_cmd_use_events(dev);
629 mlx4_err(dev, "Failed to switch to event-driven "
630 "firmware commands, aborting.\n");
631 goto err_eq_table_free;
636 if (dev->flags & MLX4_FLAG_MSI_X) {
637 mlx4_warn(dev, "NOP command failed to generate MSI-X "
638 "interrupt IRQ %d).\n",
639 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
640 mlx4_warn(dev, "Trying again without MSI-X.\n");
642 mlx4_err(dev, "NOP command failed to generate interrupt "
643 "(IRQ %d), aborting.\n",
644 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
645 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
651 mlx4_dbg(dev, "NOP command IRQ test passed\n");
653 err = mlx4_init_cq_table(dev);
655 mlx4_err(dev, "Failed to initialize "
656 "completion queue table, aborting.\n");
660 err = mlx4_init_srq_table(dev);
662 mlx4_err(dev, "Failed to initialize "
663 "shared receive queue table, aborting.\n");
664 goto err_cq_table_free;
667 err = mlx4_init_qp_table(dev);
669 mlx4_err(dev, "Failed to initialize "
670 "queue pair table, aborting.\n");
671 goto err_srq_table_free;
674 err = mlx4_init_mcg_table(dev);
676 mlx4_err(dev, "Failed to initialize "
677 "multicast group table, aborting.\n");
678 goto err_qp_table_free;
684 mlx4_cleanup_qp_table(dev);
687 mlx4_cleanup_srq_table(dev);
690 mlx4_cleanup_cq_table(dev);
693 mlx4_cmd_use_polling(dev);
696 mlx4_cleanup_eq_table(dev);
699 mlx4_cleanup_mr_table(dev);
702 mlx4_cleanup_pd_table(dev);
705 iounmap(priv->kar,PAGE_SIZE);
708 mlx4_uar_free(dev, &priv->driver_uar);
711 mlx4_cleanup_uar_table(dev);
715 static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
717 #ifdef CONFIG_PCI_MSI
718 struct mlx4_priv *priv = mlx4_priv(dev);
719 struct msix_entry entries[MLX4_NUM_EQ];
724 for (i = 0; i < MLX4_NUM_EQ; ++i)
725 entries[i].entry = i;
727 err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
730 mlx4_info(dev, "Only %d MSI-X vectors available, "
731 "not using MSI-X\n", err);
735 for (i = 0; i < MLX4_NUM_EQ; ++i)
736 priv->eq_table.eq[i].irq = entries[i].vector;
738 dev->flags |= MLX4_FLAG_MSI_X;
743 for (i = 0; i < MLX4_NUM_EQ; ++i)
744 priv->eq_table.eq[i].irq = dev->pdev->irq;
752 static struct pci_device_id * mlx4_find_pci_dev(USHORT ven_id, USHORT dev_id)
754 struct pci_device_id *p_id = mlx4_pci_table;
757 // find p_id (appropriate line in mlx4_pci_table)
758 for (i = 0; i < MLX4_PCI_TABLE_SIZE; ++i, ++p_id) {
759 if (p_id->device == dev_id && p_id->vendor == ven_id)
765 int mlx4_init_one(struct pci_dev *pdev)
767 struct pci_device_id *id;
768 struct mlx4_priv *priv;
769 struct mlx4_dev *dev;
772 #ifdef FORCE_LIVEFISH
777 /* find the type of device */
778 id = mlx4_find_pci_dev(pdev->ven_id, pdev->dev_id);
785 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
788 if (pci_resource_len(pdev, 0) != 1 << 20) {
789 MLX4_PRINT(TRACE_LEVEL_INFORMATION ,MLX4_DBG_LOW ,
790 ("Missing DCS, aborting.\n"));
794 if (!pci_resource_len(pdev, 1)) {
795 MLX4_PRINT(TRACE_LEVEL_INFORMATION ,MLX4_DBG_LOW ,
796 ("Missing UAR, aborting.\n"));
802 /* allocate mlx4_priv structure */
803 priv = kzalloc(sizeof *priv, GFP_KERNEL);
805 MLX4_PRINT(TRACE_LEVEL_INFORMATION ,MLX4_DBG_LOW ,
806 ("Device struct alloc failed, aborting.\n"));
810 /* must be here for livefish */
811 INIT_LIST_HEAD(&priv->ctx_list);
812 spin_lock_init(&priv->ctx_lock);
814 /* deal with livefish, if any */
818 if (id->driver_data == LIVEFISH)
819 dev->flags |= MLX4_FLAG_LIVEFISH;
820 if (mlx4_is_livefish(dev)) {
821 err = mlx4_register_device(dev);
823 MLX4_PRINT(TRACE_LEVEL_INFORMATION ,MLX4_DBG_LOW ,
824 ("mlx4_register_device for livefish failed, trying to proceed.\n"));
829 * Now reset the HCA before we touch the PCI capabilities or
830 * attempt a firmware command, since a boot ROM may have left
831 * the HCA in an undefined state.
833 err = mlx4_reset(dev);
835 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
839 if (mlx4_cmd_init(dev)) {
840 mlx4_err(dev, "Failed to init command interface, aborting.\n");
844 err = mlx4_init_hca(dev);
848 mlx4_enable_msi_x(dev);
850 err = mlx4_setup_hca(dev);
851 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
852 #ifdef CONFIG_PCI_MSI
853 dev->flags &= ~MLX4_FLAG_MSI_X;
854 pci_disable_msix(pdev);
856 err = mlx4_setup_hca(dev);
862 err = mlx4_register_device(dev);
866 mlx4_dbg(dev, "MLX4_BUS: NET device (dev_id=%d) is INITIALIZED ! \n", (int)pdev->dev_id);
870 mlx4_cleanup_mcg_table(dev);
871 mlx4_cleanup_qp_table(dev);
872 mlx4_cleanup_srq_table(dev);
873 mlx4_cleanup_cq_table(dev);
874 mlx4_cmd_use_polling(dev);
875 mlx4_cleanup_eq_table(dev);
876 mlx4_cleanup_mr_table(dev);
877 mlx4_cleanup_pd_table(dev);
878 mlx4_cleanup_uar_table(dev);
881 #ifdef CONFIG_PCI_MSI
882 if (dev->flags & MLX4_FLAG_MSI_X)
883 pci_disable_msix(pdev);
889 mlx4_cmd_cleanup(dev);
895 /* we failed device initialization - try to simulate "livefish" device to facilitate using FW burning tools */
896 pdev->dev_id = DEVID_HERMON_BD;
897 id = mlx4_find_pci_dev(pdev->ven_id, pdev->dev_id);
902 goto run_as_livefish;
908 void mlx4_remove_one(struct pci_dev *pdev)
910 struct mlx4_dev *dev = pdev->dev;
911 struct mlx4_priv *priv = mlx4_priv(dev);
915 mlx4_unregister_device(dev);
916 if (mlx4_is_livefish(dev))
919 for (p = 1; p <= dev->caps.num_ports; ++p)
920 mlx4_CLOSE_PORT(dev, p);
922 mlx4_cleanup_mcg_table(dev);
923 mlx4_cleanup_qp_table(dev);
924 mlx4_cleanup_srq_table(dev);
925 mlx4_cleanup_cq_table(dev);
926 mlx4_cmd_use_polling(dev);
927 mlx4_cleanup_eq_table(dev);
928 mlx4_cleanup_mr_table(dev);
929 mlx4_cleanup_pd_table(dev);
931 iounmap(priv->kar,PAGE_SIZE);
932 mlx4_uar_free(dev, &priv->driver_uar);
933 mlx4_cleanup_uar_table(dev);
935 mlx4_cmd_cleanup(dev);
937 #ifdef CONFIG_PCI_MSI
938 if (dev->flags & MLX4_FLAG_MSI_X)
939 pci_disable_msix(pdev);
942 mlx4_dbg(dev, "MLX4_BUS: NET device (dev_id=%d) is REMOVED ! \n", (int)pdev->dev_id);
949 int mlx4_restart_one(struct pci_dev *pdev)
951 mlx4_remove_one(pdev);
952 return mlx4_init_one(pdev);