iounmap(priv->clr_base, MLX4_CLR_INT_SIZE);
}
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
- int ret = -ENOMEM;
-
- /*
- * We assume that mapping one page is enough for the whole EQ
- * context table. This is fine with all current HCAs, because
- * we only use 32 EQs and each EQ uses 64 bytes of context
- * memory, or 1 KB total.
- */
- priv->eq_table.icm_virt = icm_virt;
- priv->eq_table.icm_page = alloc_page(dev->pdev, GFP_HIGHUSER);
- if (!priv->eq_table.icm_page.da)
- goto err_out;
-
- priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(priv->eq_table.icm_dma))
- goto err_dma_map;
-
- ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma.da, icm_virt);
- if (ret)
- goto err_icm_map;
- mlx4_dbg(dev,"mlx4_MAP_ICM_page: dma %#I64x, icm_virt %#I64x\n", priv->eq_table.icm_dma.da, icm_virt);
-
- return 0;
-
-err_icm_map:
- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-
-err_dma_map:
- __free_page(dev->pdev, priv->eq_table.icm_page);
-
-err_out:
- return ret;
-}
-
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
-{
- struct mlx4_priv *priv = mlx4_priv(dev);
-
- mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
- pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(dev->pdev, priv->eq_table.icm_page);
-}
-
int mlx4_init_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u8 __iomem *uar_map[(MLX4_NUM_EQS + 6) / 4];
u32 clr_mask;
struct mlx4_eq eq[MLX4_NUM_EQS];
- u64 icm_virt;
- dma_addr_t icm_page;
- dma_addr_t icm_dma;
+ struct mlx4_icm_table table;
struct mlx4_icm_table cmpt_table;
int have_irq;
u8 inta_pin;
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
-
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
u64 total_size = 0;
struct mlx4_resource *profile;
struct mlx4_resource tmp;
- int i, j;
+ int i, j, max_eqs;
profile = kzalloc(MLX4_RES_NUM * sizeof *profile, GFP_KERNEL);
if (!profile)
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
- profile[MLX4_RES_EQ].num = MLX4_NUM_EQS + dev_cap->reserved_eqs;
+ max_eqs = max( num_possible_cpus()+1, MLX4_NUM_EQS );
+ profile[MLX4_RES_EQ].num =
+ min_t( unsigned, dev_cap->max_eqs,
+ max_eqs + dev_cap->reserved_eqs );
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt;