int p;
int port_num;
+ shutter_init( &device->cache.x.work_thread );
rwlock_init(&device->cache.lock);
+ INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
+ device, ib_cache_event, NULL, NULL, 0);
+ ib_register_event_handler(&device->cache.event_handler);
+
port_num = end_port(device) - start_port(device) + 1;
-
if (port_num > 0 ) {
// if port_num ==0 ==> there are no IB ports
device->cache.pkey_cache =
kmalloc(sizeof *device->cache.pkey_cache * port_num, GFP_KERNEL);
device->cache.gid_cache =
kmalloc(sizeof *device->cache.gid_cache * port_num, GFP_KERNEL);
-
device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
- port_num, GFP_KERNEL);
+ port_num, GFP_KERNEL);
if (!device->cache.pkey_cache || !device->cache.gid_cache ||
- !device->cache.lmc_cache) {
+ !device->cache.lmc_cache) {
printk(KERN_WARNING "Couldn't allocate cache "
- "for %s\n", device->name);
+ "for %s\n", device->name);
goto err;
}
}
- shutter_init( &device->cache.x.work_thread );
-
for (p = 0; p < port_num; ++p) {
device->cache.pkey_cache[p] = NULL;
device->cache.gid_cache [p] = NULL;
ib_cache_update(device, (u8)(p + start_port(device)));
}
- INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
- device, ib_cache_event, NULL, NULL, 0);
- if (ib_register_event_handler(&device->cache.event_handler))
- goto err_cache;
-
return;
-err_cache:
- for (p = 0; p <= end_port(device) - start_port(device); ++p) {
- kfree(device->cache.pkey_cache[p]);
- kfree(device->cache.gid_cache[p]);
- }
-
err:
kfree(device->cache.pkey_cache);
kfree(device->cache.gid_cache);
{
int p;
+ ASSERT(device->cache.event_handler.device);
ib_unregister_event_handler(&device->cache.event_handler);
// instead of Linux flush_scheduled_work(): wait for them to quit
shutter_shut( &device->cache.x.work_thread );
goto out;
}
- list_add_tail(&device->core_list, &device_list);
-
- device->reg_state = IB_DEV_REGISTERED;
-
{
struct ib_client *client;
- list_for_each_entry(client, &client_list, list, struct ib_client)
- if (client->add && !add_client_context(device, client))
+ list_for_each_entry(client, &client_list, list, struct ib_client) {
+ if ( add_client_context(device, client) ) {
+ printk(KERN_WARNING "add_client_context failed for device %s\n",
+ device->name);
+ ret = -EFAULT;
+ goto out;
+ }
+ if (client->add)
client->add(device);
+ }
}
+
+ list_add_tail(&device->core_list, &device_list);
+ device->reg_state = IB_DEV_REGISTERED;
out:
mutex_unlock(&device_mutex);
int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
+ int ret = 0;
mutex_lock(&device_mutex);
- list_add_tail(&client->list, &client_list);
- list_for_each_entry(device, &device_list, core_list, struct ib_device)
- if (client->add && !add_client_context(device, client))
+ list_for_each_entry(device, &device_list, core_list, struct ib_device) {
+ if ( add_client_context(device, client) ) {
+ printk(KERN_WARNING "add_client_context failed for device %s\n",
+ device->name);
+ ret = -EFAULT;
+ goto out;
+ }
+ if (client->add)
client->add(device);
-
+ }
+
+ list_add_tail(&client->list, &client_list);
+out:
mutex_unlock(&device_mutex);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ib_register_client);
\r
p_fdo->bus_ib_ifc.pdev = &p_fdo->pci_dev;\r
p_fdo->bus_ib_ifc.p_ibdev = p_fdo->pci_dev.ib_dev;\r
- p_fdo->bus_ib_ifc.pmlx4_dev = to_mdev(p_fdo->pci_dev.ib_dev)->dev;\r
- p_fdo->bus_ib_ifc.is_livefish = mlx4_is_livefish(p_fdo->pci_dev.dev);\r
- if ( p_fdo->bus_ib_ifc.pmlx4_dev->flags & MLX4_FLAG_MSI_X )\r
- p_fdo->bus_ib_ifc.n_msi_vectors = p_fdo->pci_dev.n_msi_vectors - 2;\r
+ p_fdo->bus_ib_ifc.is_livefish = mlx4_is_livefish(p_fdo->pci_dev.dev); \r
+ if ( p_fdo->bus_ib_ifc.is_livefish == 0 ) {\r
+ p_fdo->bus_ib_ifc.pmlx4_dev = to_mdev(p_fdo->pci_dev.ib_dev)->dev; \r
+ if ( p_fdo->bus_ib_ifc.pmlx4_dev->flags & MLX4_FLAG_MSI_X )\r
+ p_fdo->bus_ib_ifc.n_msi_vectors = p_fdo->pci_dev.n_msi_vectors - 2;\r
+ }\r
\r
p_fdo->card_started = TRUE;\r
\r
pdev->int_info = *desc;\r
if (desc->Flags & CM_RESOURCE_INTERRUPT_MESSAGE) {\r
pdev->n_msi_vectors_alloc = (u8)(pdev->n_msi_vectors_alloc+desc_raw->u.MessageInterrupt.Raw.MessageCount);\r
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,\r
+ MLX4_PRINT(TRACE_LEVEL_VERBOSE, MLX4_DBG_DRV,\r
("EvtPrepareHardware: Desc %d: MsiInterrupt: Share %d, Flags %#x, Level %d, Vector %#x, Affinity %#x\n", \r
i, desc->ShareDisposition, desc->Flags,\r
desc->u.MessageInterrupt.Translated.Level, \r
desc->u.MessageInterrupt.Translated.Vector, \r
(u32)desc->u.MessageInterrupt.Translated.Affinity ));\r
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,\r
+ MLX4_PRINT(TRACE_LEVEL_VERBOSE, MLX4_DBG_DRV,\r
("EvtPrepareHardware: Desc %d: RawMsiInterrupt: Share %d, Flags %#x, MessageCount %#hx, Vector %#x, Affinity %#x\n", \r
i, desc_raw->ShareDisposition, desc_raw->Flags,\r
desc_raw->u.MessageInterrupt.Raw.MessageCount, \r
(u32)desc_raw->u.MessageInterrupt.Raw.Affinity ));\r
}\r
else { // line-based interrupt\r
- MLX4_PRINT(TRACE_LEVEL_WARNING, MLX4_DBG_DRV,\r
+ MLX4_PRINT(TRACE_LEVEL_VERBOSE, MLX4_DBG_DRV,\r
("EvtPrepareHardware: Desc %d: LineInterrupt: Share %d, Flags %#x, Level %d, Vector %#x, Affinity %#x\n", \r
i, desc->ShareDisposition, desc->Flags,\r
desc->u.Interrupt.Level, desc->u.Interrupt.Vector, \r
}\r
\r
NTSTATUS\r
-EvtDeviceAdd(\r
+EvtDriverDeviceAdd(\r
IN WDFDRIVER Driver,\r
IN PWDFDEVICE_INIT DeviceInit\r
)\r
/*++\r
Routine Description:\r
\r
- EvtDeviceAdd is called by the framework in response to AddDevice\r
+ EvtDriverDeviceAdd is called by the framework in response to AddDevice\r
call from the PnP manager. We create and initialize a device object to\r
represent a new instance of mxe bus.\r
\r
//\r
\r
WDF_DRIVER_CONFIG_INIT(\r
- &config, EvtDeviceAdd );\r
+ &config, EvtDriverDeviceAdd );\r
config.EvtDriverUnload = EvtDriverUnload;\r
\r
//\r
);\r
\r
NTSTATUS\r
-EvtDeviceAdd(\r
+EvtDriverDeviceAdd(\r
IN WDFDRIVER Driver,\r
IN PWDFDEVICE_INIT DeviceInit\r
);\r
p_vector = ka;\r
/* print (allocated+2) vectors */\r
for (i=0; i<pdev->n_msi_vectors_alloc+2; i++) {\r
- MLX4_PRINT( TRACE_LEVEL_WARNING ,MLX4_DBG_PNP ,\r
+ MLX4_PRINT( TRACE_LEVEL_VERBOSE ,MLX4_DBG_PNP ,\r
("MSI-X Vectors: Id %d, Masked %d, Addr %#I64x, Data %#x\n",\r
i, MSIX_VECTOR_MASKED(p_vector[i].Flags),\r
p_vector[i].Addr, p_vector[i].Data ));\r
)\r
{\r
u32 sem;\r
- NTSTATUS status = STATUS_SUCCESS;\r
+ NTSTATUS status = STATUS_SUCCESS, status1;\r
PBUS_INTERFACE_STANDARD p_ifc = &pdev->bus_pci_ifc;\r
PCI_COMMON_CONFIG* p_cfg = &pdev->pci_cfg_space;\r
struct msix_saved_info msix_info;\r
}\r
}\r
\r
+ status = STATUS_SUCCESS;\r
+\r
+err:\r
/* restore MSI-X info after reset */\r
- status = __pci_restore_msix_info( pdev, &msix_info );\r
- if (!NT_SUCCESS(status))\r
- goto err;\r
+ status1 = __pci_restore_msix_info( pdev, &msix_info );\r
+ status = (!status) ? status1 : status; /* return the only or the first error */\r
+ if( NT_SUCCESS( status ) ) {\r
+ MLX4_PRINT( TRACE_LEVEL_WARNING ,MLX4_DBG_PNP , ("HCA has been reset ! \n"));\r
+ }\r
\r
- /* check, whether MSI-X capabilities were restore */\r
+ /* check, whether MSI-X capabilities have been restored */\r
pci_get_msi_info( pdev, p_cfg, &pdev->uplink_info );\r
\r
- MLX4_PRINT( TRACE_LEVEL_WARNING ,MLX4_DBG_PNP , ("HCA has been reset ! \n"));\r
-\r
- status = STATUS_SUCCESS;\r
-\r
-err:\r
if (pdev->msix_info.valid) \r
pci_free_msix_info_resources(&pdev->msix_info);\r
MLX4_EXIT( MLX4_DBG_PNP );\r
// to allow for end of operations that are in progress
reset_work = IoAllocateWorkItem( dev->pdev->p_self_do );
if (!reset_work) {
+ spin_unlock_irqrestore(&ibdev->event_handler_lock, flags);
mlx4_err(dev, "mlx4_reset_request IoAllocateWorkItem failed, reset will not be propagated\n");
err = -EFAULT;
goto err_workitem;
mlx4_dispatch_reset_event(dev->pdev->ib_dev, IB_EVENT_RESET_DRIVER);
}
}
+ else {
+ err = -EFAULT;
+ mlx4_err(dev, "mlx4_cmd_wait: Unexpected end of waiting for a comand \n");
+ ASSERT(0);
+ }
}
-
- err = context->result;
+ else
+ err = context->result;
+
if (err)
goto out;
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(intf_mutex);
-static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
+static int mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
{
struct mlx4_device_context *dev_ctx;
dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
if (!dev_ctx)
- return;
+ return -EFAULT;
dev_ctx->intf = intf;
dev_ctx->context = intf->add(&priv->dev);
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irq(&priv->ctx_lock);
- } else
+ } else {
kfree(dev_ctx);
+ return -EFAULT;
+ }
+ return 0;
}
static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
int mlx4_register_interface(struct mlx4_interface *intf)
{
struct mlx4_priv *priv;
+ int err = 0;
if (!intf->add || !intf->remove)
return -EINVAL;
mutex_lock(&intf_mutex);
- list_add_tail(&intf->list, &intf_list);
- list_for_each_entry(priv, &dev_list, dev_list, struct mlx4_priv)
- mlx4_add_device(intf, priv);
+ list_for_each_entry(priv, &dev_list, dev_list, struct mlx4_priv) {
+ if (mlx4_add_device(intf, priv)) {
+ err = -EFAULT;
+ goto end;
+ }
+ }
- mutex_unlock(&intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
- return 0;
+end:
+ mutex_unlock(&intf_mutex);
+ return err;
}
EXPORT_SYMBOL_GPL(mlx4_register_interface);
mutex_lock(&intf_mutex);
+ list_for_each_entry(intf, &intf_list, list, struct mlx4_interface) {
+ if (mlx4_add_device(intf, priv)) {
+ err = -EFAULT;
+ goto end;
+ }
+ }
+
list_add_tail(&priv->dev_list, &dev_list);
- list_for_each_entry(intf, &intf_list, list, struct mlx4_interface)
- mlx4_add_device(intf, priv);
-
+
+end:
mutex_unlock(&intf_mutex);
- if (!mlx4_is_livefish(dev))
+ if (!err && !mlx4_is_livefish(dev))
err = mlx4_start_catas_poll(dev);
return err;
("mlx4_register_device for livefish failed, return with error.\n"));
pdev->dev = NULL;
kfree(priv);
+ }
+ else {
+ MLX4_PRINT(TRACE_LEVEL_ERROR ,MLX4_DBG_LOW ,
+ ("MLX4_BUS started in \"livefish\" mode !!!.\n"));
}
- MLX4_PRINT(TRACE_LEVEL_ERROR ,MLX4_DBG_LOW ,
- ("MLX4_BUS started in \"livefish\" mode !!!.\n"));
goto end;
}
mlx4_close_hca(dev);
mlx4_cmd_cleanup(dev);
- if (reset)
- mlx4_reset(dev);
+ if (reset && mlx4_reset(dev))
+ mlx4_err(dev, "Failed to reset HCA\n");
mlx4_dbg(dev, "MLX4_BUS: NET device (dev_id=%d) is REMOVED ! \n", (int)pdev->dev_id);
pdev->dev = NULL;
done:
\r
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM,\r
("context 0x%p\n", ca_context));\r
- status = mlnx_set_cb(p_hca,\r
- pfn_async_event_cb,\r
- ca_context);\r
- if (IB_SUCCESS != status) {\r
- goto err_set_cb;\r
+ if (pfn_async_event_cb) {\r
+ status = mlnx_set_cb(p_hca,\r
+ pfn_async_event_cb,\r
+ ca_context);\r
+ if (IB_SUCCESS != status) {\r
+ goto err_set_cb;\r
+ }\r
}\r
\r
\r
!!mthca_is_livefish(p_ext->hca.mdev),\r
pIfc );\r
\r
- pIfc->p_hca_dev = p_ext->cl_ext.p_pdo;\r
+ pIfc->p_hca_obj = &p_ext->hca.hob;\r
pIfc->vend_id = (uint32_t)p_ext->hcaConfig.VendorID;\r
pIfc->dev_id = (uint16_t)p_ext->hcaConfig.DeviceID;\r
pIfc->dev_revision = (uint16_t)p_ext->hca.hw_ver;\r
p_ifc->InterfaceHeader.InterfaceReference = __ref_ifc;\r
p_ifc->InterfaceHeader.InterfaceDereference = __deref_ifc;\r
p_ifc->Verbs = *p_hca_ifc;\r
- p_ifc->p_hca_obj = &p_ext->hca.hob;\r
\r
/* take the reference before returning. */\r
__ref_ifc( p_dev_obj );\r
\r
HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM,\r
("context 0x%p\n", ca_context));\r
- status = mlnx_hobs_set_cb(&p_hca->hob,\r
- pfn_async_event_cb,\r
- ca_context);\r
- if (IB_SUCCESS != status) {\r
- goto err_set_cb;\r
+ if (pfn_async_event_cb) {\r
+ status = mlnx_hobs_set_cb(&p_hca->hob,\r
+ pfn_async_event_cb,\r
+ ca_context);\r
+ if (IB_SUCCESS != status) {\r
+ goto err_set_cb;\r
+ }\r
}\r
-\r
\r
//TODO: do we need something for kernel users ?\r
\r
u8 p;
rwlock_init(&device->cache.lock);
+ INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
+ device, ib_cache_event);
+ ib_register_event_handler(&device->cache.event_handler);
device->cache.pkey_cache =
kmalloc(sizeof *device->cache.pkey_cache *
ib_cache_update(device, p + start_port(device));
}
- INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
- device, ib_cache_event);
- if (ib_register_event_handler(&device->cache.event_handler))
- goto err_cache;
-
return;
-err_cache:
- for (p = 0; p <= end_port(device) - start_port(device); ++p) {
- kfree(device->cache.pkey_cache[p]);
- kfree(device->cache.gid_cache[p]);
- }
-
err:
kfree(device->cache.pkey_cache);
kfree(device->cache.gid_cache);