[MLX4] added new Registry parameter for number of FC_EXCHs and added a sanity check...
[mirror/winof/.git] / hw / mlx4 / kernel / bus / ib / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "mlx4_ib.h"
34 #include "ib_smi.h"
35 #include "driver.h"
36 #include "cmd.h"
37 #include "user.h"
38 #include "ib_cache.h"
39 #include "net\mlx4.h"
40
41 #if     1//WORKAROUND_POLL_EQ
42 void mlx4_poll_eq(struct ib_device *dev, BOOLEAN bStart);
43 #endif
44
45
46 static void init_query_mad(struct ib_smp *mad)
47 {
48         mad->base_version  = 1;
49         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
50         mad->class_version = 1;
51         mad->method        = IB_MGMT_METHOD_GET;
52 }
53
54 static int mlx4_ib_query_device(struct ib_device *ibdev,
55                                 struct ib_device_attr *props)
56 {
57         struct mlx4_ib_dev *dev = to_mdev(ibdev);
58         struct ib_smp *in_mad  = NULL;
59         struct ib_smp *out_mad = NULL;
60         int err = -ENOMEM;
61
62         if (mlx4_is_barred(ibdev->dma_device))
63                 return -EFAULT;
64         
65         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
66         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
67         if (!in_mad || !out_mad)
68                 goto out;
69
70         init_query_mad(in_mad);
71         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
72
73         err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
74         if (err)
75                 goto out;
76
77         memset(props, 0, sizeof *props);
78
79         props->fw_ver = dev->dev->caps.fw_ver;
80         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
81                 IB_DEVICE_PORT_ACTIVE_EVENT             |
82                 IB_DEVICE_SYS_IMAGE_GUID                |
83                 IB_DEVICE_RC_RNR_NAK_GEN;
84         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
85                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
86         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
87                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
88         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
89                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
90         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
91                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
92         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
93                 props->device_cap_flags |= IB_DEVICE_IPOIB_CSUM;
94         if (dev->dev->caps.max_gso_sz)
95                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
96         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
97                 0xffffff;
98         props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
99         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
100         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
101
102         props->max_mr_size         = ~0ull;
103         props->page_size_cap       = dev->dev->caps.page_size_cap;
104         props->max_qp              = dev->dev->caps.num_qps - dev->dev->caps.total_reserved_qps;
105         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
106
107         props->max_sge             = min(dev->dev->caps.max_sq_sg,
108                                          dev->dev->caps.max_rq_sg);
109         props->max_cq              = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
110         props->max_cqe             = dev->dev->caps.max_cqes;
111         props->max_mr              = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
112         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
113         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
114         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
115         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
116         props->max_srq             = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
117         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
118         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
119         props->local_ca_ack_delay  = (u8)dev->dev->caps.local_ca_ack_delay;
120         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
121                 IB_ATOMIC_HCA : IB_ATOMIC_NON;
122         props->max_pkeys           = (u16)dev->dev->caps.pkey_table_len[1];
123         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
124         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
125         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
126                                            props->max_mcast_grp;
127         props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
128
129 out:
130         kfree(in_mad);
131         kfree(out_mad);
132
133         return err;
134 }
135
136 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
137                               struct ib_port_attr *props)
138 {
139         struct ib_smp *in_mad  = NULL;
140         struct ib_smp *out_mad = NULL;
141         int err = -ENOMEM;
142
143         if (mlx4_is_barred(ibdev->dma_device))
144                 return -EFAULT;
145         
146         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
147         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
148         if (!in_mad || !out_mad)
149                 goto out;
150
151         memset(props, 0, sizeof *props);
152
153         init_query_mad(in_mad);
154         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
155         in_mad->attr_mod = cpu_to_be32(port);
156
157         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
158         if (err)
159                 goto out;
160
161         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
162         props->lmc              = out_mad->data[34] & 0x7;
163         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
164         props->sm_sl            = out_mad->data[36] & 0xf;
165         props->state            = out_mad->data[32] & 0xf;
166         props->phys_state       = out_mad->data[33] >> 4;
167         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
168         props->gid_tbl_len      = to_mdev(ibdev)->dev->caps.gid_table_len[port];
169         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
170         props->pkey_tbl_len     = (u16)to_mdev(ibdev)->dev->caps.pkey_table_len[port];
171         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
172         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
173         props->active_width     = out_mad->data[31] & 0xf;
174         props->active_speed     = out_mad->data[35] >> 4;
175         props->max_mtu          = out_mad->data[41] & 0xf;
176         props->active_mtu       = out_mad->data[36] >> 4;
177         props->subnet_timeout   = out_mad->data[51] & 0x1f;
178         props->max_vl_num       = out_mad->data[37] >> 4;
179         props->init_type_reply  = out_mad->data[41] >> 4;
180
181 out:
182         kfree(in_mad);
183         kfree(out_mad);
184
185         return err;
186 }
187
188 static int mlx4_ib_query_gid_chunk(struct ib_device *ibdev, u8 port, int index,
189                              union ib_gid gid[8], int size)
190 {
191         struct ib_smp *in_mad  = NULL;
192         struct ib_smp *out_mad = NULL;
193         __be64  subnet_prefix;
194         int err = -ENOMEM;
195
196         if (mlx4_is_barred(ibdev->dma_device))
197                 return -EFAULT;
198         
199         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
200         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
201         if (!in_mad || !out_mad)
202                 goto out;
203
204         init_query_mad(in_mad);
205         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
206         in_mad->attr_mod = cpu_to_be32(port);
207
208         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
209         if (err)
210                 goto out;
211
212         memcpy(&subnet_prefix, out_mad->data + 8, 8);
213
214         init_query_mad(in_mad);
215         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
216         in_mad->attr_mod = cpu_to_be32(index / 8);
217
218         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
219         if (err)
220                 goto out;
221
222         { // copy the results
223                 int i;
224                 __be64 *guid = (__be64 *)out_mad->data;
225                 for (i=0; i<size; ++i) {
226                         gid[i].global.subnet_prefix = subnet_prefix;
227                         gid[i].global.interface_id = guid[i];
228                 }
229         }
230
231 out:
232         kfree(in_mad);
233         kfree(out_mad);
234         return err;
235 }
236
237 static int mlx4_ib_query_pkey_chunk(struct ib_device *ibdev, u8 port, u16 index,
238                              __be16 pkey[32], int size)
239 {
240         struct ib_smp *in_mad  = NULL;
241         struct ib_smp *out_mad = NULL;
242         int err = -ENOMEM;
243
244         if (mlx4_is_barred(ibdev->dma_device))
245                 return -EFAULT;
246
247         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
248         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
249         if (!in_mad || !out_mad)
250                 goto out;
251
252         init_query_mad(in_mad);
253         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
254         in_mad->attr_mod = cpu_to_be32(index / 32);
255
256         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
257         if (err)
258                 goto out;
259
260         { // copy the results
261                 int i;
262                 __be16 *pkey_chunk = (__be16 *)out_mad->data;
263                 for (i=0; i<size; ++i) 
264                         pkey[i] = pkey_chunk[i];
265         }
266
267 out:
268         kfree(in_mad);
269         kfree(out_mad);
270         return err;
271 }
272
273 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
274                                  struct ib_device_modify *props)
275 {
276         if (mlx4_is_barred(ibdev->dma_device))
277                 return -EFAULT;
278
279         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
280                 return -EOPNOTSUPP;
281
282         if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
283                 spin_lock(&to_mdev(ibdev)->sm_lock);
284                 memcpy(ibdev->node_desc, props->node_desc, 64);
285                 spin_unlock(&to_mdev(ibdev)->sm_lock);
286         }
287
288         return 0;
289 }
290 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
291                                struct ib_port_modify *props)
292 {
293         struct ib_port_attr attr;
294         u32 cap_mask;
295         int err;
296
297         if (mlx4_is_barred(ibdev->dma_device))
298                 return -EFAULT;
299
300         mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
301
302         err = mlx4_ib_query_port(ibdev, port, &attr);
303         if (err)
304                 goto out;
305
306         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
307                 ~props->clr_port_cap_mask;
308
309         err = mlx4_SET_PORT(to_mdev(ibdev)->dev, port,
310                             !!(mask & IB_PORT_RESET_QKEY_CNTR),
311                             cap_mask);
312
313 out:
314         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
315         return err;
316 }
317
318 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
319                                                   struct ib_udata *udata)
320 {
321         struct mlx4_ib_dev *dev = to_mdev(ibdev);
322         struct mlx4_ib_ucontext *context;
323         struct mlx4_ib_alloc_ucontext_resp resp;
324         int err;
325
326         if (mlx4_is_barred(ibdev->dma_device))
327                 return ERR_PTR(-EFAULT);
328
329         resp.qp_tab_size      = dev->dev->caps.num_qps;
330         resp.bf_reg_size      = (__u16)dev->dev->caps.bf_reg_size;
331         resp.bf_regs_per_page = (__u16)dev->dev->caps.bf_regs_per_page;
332
333         context = kzalloc(sizeof *context, GFP_KERNEL);
334         if (!context)
335                 return ERR_PTR(-ENOMEM);
336
337         if (mlx4_is_livefish(to_mdev(ibdev)->dev))
338                 goto done;
339         
340         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
341         if (err) {
342                 kfree(context);
343                 return ERR_PTR(err);
344         }
345
346         INIT_LIST_HEAD(&context->db_page_list);
347         mutex_init(&context->db_page_mutex);
348
349 done:
350         err = ib_copy_to_udata(udata, &resp, sizeof resp);
351         if (err) {
352                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
353                 kfree(context);
354                 return ERR_PTR(-EFAULT);
355         }
356
357         return &context->ibucontext;
358 }
359
360 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
361 {
362         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
363
364         if (!mlx4_is_livefish(to_mdev(ibcontext->device)->dev))
365                 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
366         kfree(context);
367
368         return 0;
369 }
370
371 #if 0
372         // TODO: not clear, what is the usage 
373 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
374 {
375         struct mlx4_ib_dev *dev = to_mdev(context->device);
376
377         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
378                 return -EINVAL;
379
380         if (vma->vm_pgoff == 0) {
381                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
382
383                 if (io_remap_pfn_range(vma, vma->vm_start,
384                                        to_mucontext(context)->uar.pfn,
385                                        PAGE_SIZE, vma->vm_page_prot))
386                         return -EAGAIN;
387         } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
388                 /* FIXME want pgprot_writecombine() for BlueFlame pages */
389                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
390
391                 if (io_remap_pfn_range(vma, vma->vm_start,
392                                        to_mucontext(context)->uar.pfn +
393                                        dev->dev->caps.num_uars,
394                                        PAGE_SIZE, vma->vm_page_prot))
395                         return -EAGAIN;
396         } else
397                 return -EINVAL;
398
399         return 0;
400 }
401 #endif  
402
403 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
404                                       struct ib_ucontext *context,
405                                       struct ib_udata *udata)
406 {
407         struct mlx4_ib_pd *pd;
408         int err;
409
410         if (mlx4_is_barred(ibdev->dma_device))
411                 return ERR_PTR(-EFAULT);
412
413         pd = kmalloc(sizeof *pd, GFP_KERNEL);
414         if (!pd)
415                 return ERR_PTR(-ENOMEM);
416
417         if (mlx4_is_livefish(to_mdev(ibdev)->dev))
418                 goto done;
419
420         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
421         if (err) {
422                 kfree(pd);
423                 return ERR_PTR(err);
424         }
425
426         if (context)
427                 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
428                         mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
429                         kfree(pd);
430                         return ERR_PTR(-EFAULT);
431                 }
432
433 done:
434         return &pd->ibpd;
435 }
436
437 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
438 {
439         if (!mlx4_is_livefish(to_mdev(pd->device)->dev))
440                 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
441         kfree(pd);
442
443         return 0;
444 }
445
446 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
447 {
448         UNUSED_PARAM(lid);
449         if (mlx4_is_barred(ibqp->device->dma_device))
450                 return -EFAULT;
451         return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
452                                      &to_mqp(ibqp)->mqp, gid->raw);
453 }
454
455 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
456 {
457         UNUSED_PARAM(lid);
458         if (mlx4_is_barred(ibqp->device->dma_device))
459                 return -EFAULT;
460         return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
461                                      &to_mqp(ibqp)->mqp, gid->raw);
462 }
463
464 static int init_node_data(struct mlx4_ib_dev *dev)
465 {
466         struct ib_smp *in_mad  = NULL;
467         struct ib_smp *out_mad = NULL;
468         int err = -ENOMEM;
469
470         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
471         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
472         if (!in_mad || !out_mad)
473                 goto out;
474
475         init_query_mad(in_mad);
476         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
477
478         err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
479         if (err)
480                 goto out;
481
482         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
483         memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
484
485         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
486
487         err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
488         if (err)
489                 goto out;
490
491         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
492
493 out:
494         kfree(in_mad);
495         kfree(out_mad);
496         return err;
497 }
498
499 static void *mlx4_ib_add(struct mlx4_dev *dev)
500 {
501         struct mlx4_ib_dev *ibdev;
502
503         ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
504         if (!ibdev) {
505                 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
506                 return NULL;
507         }
508
509         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
510
511         INIT_LIST_HEAD(&ibdev->pgdir_list);
512         mutex_init(&ibdev->pgdir_mutex);
513
514         ibdev->dev = dev;
515
516         strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
517         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
518         ibdev->ib_dev.phys_port_cnt     = (u8)mlx4_count_ib_ports(dev);
519         ibdev->ib_dev.num_comp_vectors  = 1;
520         ibdev->ib_dev.dma_device        = dev->pdev->dev;
521
522         ibdev->ib_dev.uverbs_abi_ver    = MLX4_IB_UVERBS_ABI_VERSION;
523         ibdev->ib_dev.query_device      = mlx4_ib_query_device;
524         ibdev->ib_dev.query_port        = mlx4_ib_query_port;
525         ibdev->ib_dev.query_gid_chunk   = mlx4_ib_query_gid_chunk;
526         ibdev->ib_dev.query_pkey_chunk  = mlx4_ib_query_pkey_chunk;
527         ibdev->ib_dev.modify_device     = mlx4_ib_modify_device;
528         ibdev->ib_dev.modify_port       = mlx4_ib_modify_port;
529         ibdev->ib_dev.alloc_ucontext    = mlx4_ib_alloc_ucontext;
530         ibdev->ib_dev.dealloc_ucontext  = mlx4_ib_dealloc_ucontext;
531         ibdev->ib_dev.mmap              = NULL;         /* mlx4_ib_mmap; */
532         ibdev->ib_dev.alloc_pd          = mlx4_ib_alloc_pd;
533         ibdev->ib_dev.dealloc_pd        = mlx4_ib_dealloc_pd;
534         ibdev->ib_dev.create_ah         = mlx4_ib_create_ah;
535         ibdev->ib_dev.query_ah          = mlx4_ib_query_ah;
536         ibdev->ib_dev.modify_ah         = mlx4_ib_modify_ah;
537         ibdev->ib_dev.destroy_ah        = mlx4_ib_destroy_ah;
538         ibdev->ib_dev.create_srq        = mlx4_ib_create_srq;
539         ibdev->ib_dev.modify_srq        = mlx4_ib_modify_srq;
540         ibdev->ib_dev.query_srq         = mlx4_ib_query_srq;
541         ibdev->ib_dev.destroy_srq       = mlx4_ib_destroy_srq;
542         ibdev->ib_dev.post_srq_recv     = mlx4_ib_post_srq_recv;
543         ibdev->ib_dev.create_qp         = mlx4_ib_create_qp;
544         ibdev->ib_dev.modify_qp         = mlx4_ib_modify_qp;
545         ibdev->ib_dev.query_qp          = mlx4_ib_query_qp;
546         ibdev->ib_dev.destroy_qp        = mlx4_ib_destroy_qp;
547         ibdev->ib_dev.post_send         = mlx4_ib_post_send;
548         ibdev->ib_dev.post_recv         = mlx4_ib_post_recv;
549         ibdev->ib_dev.create_cq         = mlx4_ib_create_cq;
550         ibdev->ib_dev.modify_cq         = mlx4_ib_modify_cq;
551         ibdev->ib_dev.destroy_cq        = mlx4_ib_destroy_cq;
552         ibdev->ib_dev.poll_cq           = mlx4_ib_poll_cq;
553         ibdev->ib_dev.req_notify_cq     = mlx4_ib_arm_cq;
554         ibdev->ib_dev.get_dma_mr        = mlx4_ib_get_dma_mr;
555         ibdev->ib_dev.reg_user_mr       = mlx4_ib_reg_user_mr;
556         ibdev->ib_dev.dereg_mr          = mlx4_ib_dereg_mr;
557         ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
558         ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
559         ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
560
561         ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
562         ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
563         ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
564         ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
565         ibdev->ib_dev.x.find_cached_gid         = ib_find_cached_gid;
566         ibdev->ib_dev.x.find_cached_pkey        = ib_find_cached_pkey;
567         ibdev->ib_dev.x.get_cached_gid          = ib_get_cached_gid;
568         ibdev->ib_dev.x.get_cached_pkey         = ib_get_cached_pkey;
569         ibdev->ib_dev.x.register_ev_cb          = mlx4_reset_cb_register;
570         ibdev->ib_dev.x.unregister_ev_cb        = mlx4_reset_cb_unregister;
571 #if 1//WORKAROUND_POLL_EQ
572         ibdev->ib_dev.x.poll_eq                         = mlx4_poll_eq;
573 #endif
574         if (mlx4_is_livefish(ibdev->dev)) {
575                 if (ib_register_device(&ibdev->ib_dev))
576                         goto err_dealloc;
577                 return ibdev;
578         }
579
580         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
581                 goto err_dealloc;
582
583         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
584                 goto err_pd;
585
586         ibdev->uar_map = ioremap((u64)ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
587         if (!ibdev->uar_map)
588                 goto err_uar;
589
590         if (init_node_data(ibdev))
591                 goto err_map;
592
593         spin_lock_init(&ibdev->sm_lock);
594         mutex_init(&ibdev->cap_mask_mutex);
595
596         if (ib_register_device(&ibdev->ib_dev))
597                 goto err_map;
598
599         mlx4_dbg(ibdev->dev, "MLX4_BUS: IB interface is ADDED ! \n");
600
601         return ibdev;
602
603 err_map:
604         iounmap(ibdev->uar_map, PAGE_SIZE);
605
606 err_uar:
607         mlx4_uar_free(dev, &ibdev->priv_uar);
608
609 err_pd:
610         mlx4_pd_free(dev, ibdev->priv_pdn);
611
612 err_dealloc:
613         ibdev->ib_dev.reg_state = IB_DEV_UNINITIALIZED;
614         ib_dealloc_device(&ibdev->ib_dev);
615
616         return NULL;
617 }
618
619 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
620 {
621         struct mlx4_ib_dev *ibdev = ibdev_ptr;
622         int p;
623
624         if (mlx4_is_livefish(ibdev->dev)) {
625                 ib_unregister_device(&ibdev->ib_dev);
626                 goto dealloc_dev;
627         }
628
629         ib_unregister_device(&ibdev->ib_dev);
630         
631         for (p = 1; p <= dev->caps.num_ports; ++p)
632                 mlx4_CLOSE_PORT(dev, p);
633
634         iounmap(ibdev->uar_map,PAGE_SIZE);
635         mlx4_uar_free(dev, &ibdev->priv_uar);
636         mlx4_pd_free(dev, ibdev->priv_pdn);
637 dealloc_dev:    
638         mlx4_dbg(ibdev->dev, "MLX4_BUS: IB interface is REMOVED ! \n");
639         ibdev->ib_dev.reg_state = IB_DEV_UNINITIALIZED;
640         ib_dealloc_device(&ibdev->ib_dev);
641 }
642
643 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
644                           enum mlx4_dev_event event, int subtype,
645                           int port)
646 {
647         struct ib_event ibev;
648
649         UNUSED_PARAM(dev);
650
651         switch (event) {
652         case MLX4_EVENT_TYPE_PORT_CHANGE:
653                 ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
654                         IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
655                 break;
656
657         case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR:
658                 ibev.event = IB_EVENT_DEVICE_FATAL;
659                 break;
660
661         default:
662                 return;
663         }
664
665         ibev.device           = ibdev_ptr;
666         ibev.element.port_num = (u8)port;
667
668         ib_dispatch_event(&ibev);
669 }
670
671 static struct mlx4_interface mlx4_ib_interface = {
672         mlx4_ib_add,            /* add */
673         mlx4_ib_remove,         /* remove */
674         mlx4_ib_event,          /* event */
675         NULL, NULL                      /* list */
676 };
677
678 int __init mlx4_ib_init(void)
679 {
680         mlx4_ib_qp_init();
681         return mlx4_register_interface(&mlx4_ib_interface);
682 }
683
684 void __exit mlx4_ib_cleanup(void)
685 {
686         mlx4_unregister_interface(&mlx4_ib_interface);
687 }
688
689
690