[MLX4] cleanup.
[mirror/winof/.git] / hw / mlx4 / kernel / bus / ib / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "mlx4_ib.h"
34 #include "ib_smi.h"
35 #include "driver.h"
36 #include "cmd.h"
37 #include "user.h"
38 #include "ib_cache.h"
39 #include "net\mlx4.h"
40
41 #if     1//WORKAROUND_POLL_EQ
42 void mlx4_poll_eq(struct ib_device *dev, BOOLEAN bStart);
43 #endif
44
45
46 static void init_query_mad(struct ib_smp *mad)
47 {
48         mad->base_version  = 1;
49         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
50         mad->class_version = 1;
51         mad->method        = IB_MGMT_METHOD_GET;
52 }
53
54 static int mlx4_ib_query_device(struct ib_device *ibdev,
55                                 struct ib_device_attr *props)
56 {
57         struct mlx4_ib_dev *dev = to_mdev(ibdev);
58         struct ib_smp *in_mad  = NULL;
59         struct ib_smp *out_mad = NULL;
60         int err = -ENOMEM;
61
62         if (mlx4_is_barred(ibdev->dma_device))
63                 return -EFAULT;
64         
65         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
66         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
67         if (!in_mad || !out_mad)
68                 goto out;
69
70         init_query_mad(in_mad);
71         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
72
73         err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
74         if (err)
75                 goto out;
76
77         memset(props, 0, sizeof *props);
78
79         props->fw_ver = dev->dev->caps.fw_ver;
80         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
81                 IB_DEVICE_PORT_ACTIVE_EVENT             |
82                 IB_DEVICE_SYS_IMAGE_GUID                |
83                 IB_DEVICE_RC_RNR_NAK_GEN;
84         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
85                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
86         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
87                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
88         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
89                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
90         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
91                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
92         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
93                 props->device_cap_flags |= IB_DEVICE_IPOIB_CSUM;
94         if (dev->dev->caps.max_gso_sz)
95                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
96         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
97                 0xffffff;
98         props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
99         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
100         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
101
102         props->max_mr_size         = ~0ull;
103         props->page_size_cap       = dev->dev->caps.page_size_cap;
104         props->max_qp              = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] - 
105                                  dev->dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
106         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
107
108         props->max_sge             = min(dev->dev->caps.max_sq_sg,
109                                          dev->dev->caps.max_rq_sg);
110         props->max_cq              = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
111         props->max_cqe             = dev->dev->caps.max_cqes;
112         props->max_mr              = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
113         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
114         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
115         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
116         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
117         props->max_srq             = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
118         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
119         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
120         props->local_ca_ack_delay  = (u8)dev->dev->caps.local_ca_ack_delay;
121         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
122                 IB_ATOMIC_HCA : IB_ATOMIC_NON;
123         props->max_pkeys           = (u16)dev->dev->caps.pkey_table_len[1];
124         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
125         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
126         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
127                                            props->max_mcast_grp;
128         props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
129
130 out:
131         kfree(in_mad);
132         kfree(out_mad);
133
134         return err;
135 }
136
137 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
138                               struct ib_port_attr *props)
139 {
140         struct ib_smp *in_mad  = NULL;
141         struct ib_smp *out_mad = NULL;
142         int err = -ENOMEM;
143
144         if (mlx4_is_barred(ibdev->dma_device))
145                 return -EFAULT;
146         
147         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
148         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
149         if (!in_mad || !out_mad)
150                 goto out;
151
152         memset(props, 0, sizeof *props);
153
154         init_query_mad(in_mad);
155         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
156         in_mad->attr_mod = cpu_to_be32(port);
157
158         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
159         if (err)
160                 goto out;
161
162         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
163         props->lmc              = out_mad->data[34] & 0x7;
164         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
165         props->sm_sl            = out_mad->data[36] & 0xf;
166         props->state            = out_mad->data[32] & 0xf;
167         props->phys_state       = out_mad->data[33] >> 4;
168         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
169         props->gid_tbl_len      = to_mdev(ibdev)->dev->caps.gid_table_len[port];
170         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
171         props->pkey_tbl_len     = (u16)to_mdev(ibdev)->dev->caps.pkey_table_len[port];
172         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
173         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
174         props->active_width     = out_mad->data[31] & 0xf;
175         props->active_speed     = out_mad->data[35] >> 4;
176         props->max_mtu          = out_mad->data[41] & 0xf;
177         props->active_mtu       = out_mad->data[36] >> 4;
178         props->subnet_timeout   = out_mad->data[51] & 0x1f;
179         props->max_vl_num       = out_mad->data[37] >> 4;
180         props->init_type_reply  = out_mad->data[41] >> 4;
181
182 out:
183         kfree(in_mad);
184         kfree(out_mad);
185
186         return err;
187 }
188
189 static int mlx4_ib_query_gid_chunk(struct ib_device *ibdev, u8 port, int index,
190                              union ib_gid gid[8], int size)
191 {
192         struct ib_smp *in_mad  = NULL;
193         struct ib_smp *out_mad = NULL;
194         __be64  subnet_prefix;
195         int err = -ENOMEM;
196
197         if (mlx4_is_barred(ibdev->dma_device))
198                 return -EFAULT;
199         
200         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
201         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
202         if (!in_mad || !out_mad)
203                 goto out;
204
205         init_query_mad(in_mad);
206         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
207         in_mad->attr_mod = cpu_to_be32(port);
208
209         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
210         if (err)
211                 goto out;
212
213         memcpy(&subnet_prefix, out_mad->data + 8, 8);
214
215         init_query_mad(in_mad);
216         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
217         in_mad->attr_mod = cpu_to_be32(index / 8);
218
219         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
220         if (err)
221                 goto out;
222
223         { // copy the results
224                 int i;
225                 __be64 *guid = (__be64 *)out_mad->data;
226                 for (i=0; i<size; ++i) {
227                         gid[i].global.subnet_prefix = subnet_prefix;
228                         gid[i].global.interface_id = guid[i];
229                 }
230         }
231
232 out:
233         kfree(in_mad);
234         kfree(out_mad);
235         return err;
236 }
237
238 static int mlx4_ib_query_pkey_chunk(struct ib_device *ibdev, u8 port, u16 index,
239                              __be16 pkey[32], int size)
240 {
241         struct ib_smp *in_mad  = NULL;
242         struct ib_smp *out_mad = NULL;
243         int err = -ENOMEM;
244
245         if (mlx4_is_barred(ibdev->dma_device))
246                 return -EFAULT;
247
248         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
249         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
250         if (!in_mad || !out_mad)
251                 goto out;
252
253         init_query_mad(in_mad);
254         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
255         in_mad->attr_mod = cpu_to_be32(index / 32);
256
257         err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
258         if (err)
259                 goto out;
260
261         { // copy the results
262                 int i;
263                 __be16 *pkey_chunk = (__be16 *)out_mad->data;
264                 for (i=0; i<size; ++i) 
265                         pkey[i] = pkey_chunk[i];
266         }
267
268 out:
269         kfree(in_mad);
270         kfree(out_mad);
271         return err;
272 }
273
274 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
275                                  struct ib_device_modify *props)
276 {
277         if (mlx4_is_barred(ibdev->dma_device))
278                 return -EFAULT;
279
280         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
281                 return -EOPNOTSUPP;
282
283         if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
284                 spin_lock(&to_mdev(ibdev)->sm_lock);
285                 memcpy(ibdev->node_desc, props->node_desc, 64);
286                 spin_unlock(&to_mdev(ibdev)->sm_lock);
287         }
288
289         return 0;
290 }
291 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
292                                struct ib_port_modify *props)
293 {
294         struct ib_port_attr attr;
295         u32 cap_mask;
296         int err;
297
298         if (mlx4_is_barred(ibdev->dma_device))
299                 return -EFAULT;
300
301         mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
302
303         err = mlx4_ib_query_port(ibdev, port, &attr);
304         if (err)
305                 goto out;
306
307         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
308                 ~props->clr_port_cap_mask;
309
310         err = mlx4_SET_PORT(to_mdev(ibdev)->dev, port,
311                             !!(mask & IB_PORT_RESET_QKEY_CNTR),
312                             cap_mask);
313
314 out:
315         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
316         return err;
317 }
318
319 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
320                                                   struct ib_udata *udata)
321 {
322         struct mlx4_ib_dev *dev = to_mdev(ibdev);
323         struct mlx4_ib_ucontext *context;
324         struct mlx4_ib_alloc_ucontext_resp resp;
325         int err;
326
327         if (mlx4_is_barred(ibdev->dma_device))
328                 return ERR_PTR(-EFAULT);
329
330         resp.qp_tab_size      = dev->dev->caps.num_qps;
331         resp.bf_reg_size      = (__u16)dev->dev->caps.bf_reg_size;
332         resp.bf_regs_per_page = (__u16)dev->dev->caps.bf_regs_per_page;
333
334         context = kzalloc(sizeof *context, GFP_KERNEL);
335         if (!context)
336                 return ERR_PTR(-ENOMEM);
337
338         if (mlx4_is_livefish(to_mdev(ibdev)->dev))
339                 goto done;
340         
341         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
342         if (err) {
343                 kfree(context);
344                 return ERR_PTR(err);
345         }
346
347         INIT_LIST_HEAD(&context->db_page_list);
348         mutex_init(&context->db_page_mutex);
349
350 done:
351         err = ib_copy_to_udata(udata, &resp, sizeof resp);
352         if (err) {
353                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
354                 kfree(context);
355                 return ERR_PTR(-EFAULT);
356         }
357
358         return &context->ibucontext;
359 }
360
361 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
362 {
363         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
364
365         if (!mlx4_is_livefish(to_mdev(ibcontext->device)->dev))
366                 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
367         kfree(context);
368
369         return 0;
370 }
371
372 #if 0
373         // TODO: not clear, what is the usage 
374 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
375 {
376         struct mlx4_ib_dev *dev = to_mdev(context->device);
377
378         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
379                 return -EINVAL;
380
381         if (vma->vm_pgoff == 0) {
382                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
383
384                 if (io_remap_pfn_range(vma, vma->vm_start,
385                                        to_mucontext(context)->uar.pfn,
386                                        PAGE_SIZE, vma->vm_page_prot))
387                         return -EAGAIN;
388         } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
389                 /* FIXME want pgprot_writecombine() for BlueFlame pages */
390                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
391
392                 if (io_remap_pfn_range(vma, vma->vm_start,
393                                        to_mucontext(context)->uar.pfn +
394                                        dev->dev->caps.num_uars,
395                                        PAGE_SIZE, vma->vm_page_prot))
396                         return -EAGAIN;
397         } else
398                 return -EINVAL;
399
400         return 0;
401 }
402 #endif  
403
404 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
405                                       struct ib_ucontext *context,
406                                       struct ib_udata *udata)
407 {
408         struct mlx4_ib_pd *pd;
409         int err;
410
411         if (mlx4_is_barred(ibdev->dma_device))
412                 return ERR_PTR(-EFAULT);
413
414         pd = kmalloc(sizeof *pd, GFP_KERNEL);
415         if (!pd)
416                 return ERR_PTR(-ENOMEM);
417
418         if (mlx4_is_livefish(to_mdev(ibdev)->dev))
419                 goto done;
420
421         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
422         if (err) {
423                 kfree(pd);
424                 return ERR_PTR(err);
425         }
426
427         if (context)
428                 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
429                         mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
430                         kfree(pd);
431                         return ERR_PTR(-EFAULT);
432                 }
433
434 done:
435         return &pd->ibpd;
436 }
437
438 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
439 {
440         if (!mlx4_is_livefish(to_mdev(pd->device)->dev))
441                 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
442         kfree(pd);
443
444         return 0;
445 }
446
447 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
448 {
449         UNUSED_PARAM(lid);
450         if (mlx4_is_barred(ibqp->device->dma_device))
451                 return -EFAULT;
452         return mlx4_multicast_attach(to_mdev(ibqp->device)->dev,
453                                      &to_mqp(ibqp)->mqp, gid->raw);
454 }
455
456 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
457 {
458         UNUSED_PARAM(lid);
459         if (mlx4_is_barred(ibqp->device->dma_device))
460                 return -EFAULT;
461         return mlx4_multicast_detach(to_mdev(ibqp->device)->dev,
462                                      &to_mqp(ibqp)->mqp, gid->raw);
463 }
464
465 static int init_node_data(struct mlx4_ib_dev *dev)
466 {
467         struct ib_smp *in_mad  = NULL;
468         struct ib_smp *out_mad = NULL;
469         int err = -ENOMEM;
470
471         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
472         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
473         if (!in_mad || !out_mad)
474                 goto out;
475
476         init_query_mad(in_mad);
477         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
478
479         err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
480         if (err)
481                 goto out;
482
483         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
484         memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
485
486         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
487
488         err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
489         if (err)
490                 goto out;
491
492         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
493
494 out:
495         kfree(in_mad);
496         kfree(out_mad);
497         return err;
498 }
499
500 static void *mlx4_ib_add(struct mlx4_dev *dev)
501 {
502         struct mlx4_ib_dev *ibdev;
503
504         ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
505         if (!ibdev) {
506                 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
507                 return NULL;
508         }
509
510         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
511
512         INIT_LIST_HEAD(&ibdev->pgdir_list);
513         mutex_init(&ibdev->pgdir_mutex);
514
515         ibdev->dev = dev;
516
517         strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
518         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
519         ibdev->ib_dev.phys_port_cnt     = (u8)mlx4_count_ib_ports(dev);
520         ibdev->ib_dev.num_comp_vectors  = 1;
521         ibdev->ib_dev.dma_device        = dev->pdev->dev;
522
523         ibdev->ib_dev.uverbs_abi_ver    = MLX4_IB_UVERBS_ABI_VERSION;
524         ibdev->ib_dev.query_device      = mlx4_ib_query_device;
525         ibdev->ib_dev.query_port        = mlx4_ib_query_port;
526         ibdev->ib_dev.query_gid_chunk   = mlx4_ib_query_gid_chunk;
527         ibdev->ib_dev.query_pkey_chunk  = mlx4_ib_query_pkey_chunk;
528         ibdev->ib_dev.modify_device     = mlx4_ib_modify_device;
529         ibdev->ib_dev.modify_port       = mlx4_ib_modify_port;
530         ibdev->ib_dev.alloc_ucontext    = mlx4_ib_alloc_ucontext;
531         ibdev->ib_dev.dealloc_ucontext  = mlx4_ib_dealloc_ucontext;
532         ibdev->ib_dev.mmap              = NULL;         /* mlx4_ib_mmap; */
533         ibdev->ib_dev.alloc_pd          = mlx4_ib_alloc_pd;
534         ibdev->ib_dev.dealloc_pd        = mlx4_ib_dealloc_pd;
535         ibdev->ib_dev.create_ah         = mlx4_ib_create_ah;
536         ibdev->ib_dev.query_ah          = mlx4_ib_query_ah;
537         ibdev->ib_dev.modify_ah         = mlx4_ib_modify_ah;
538         ibdev->ib_dev.destroy_ah        = mlx4_ib_destroy_ah;
539         ibdev->ib_dev.create_srq        = mlx4_ib_create_srq;
540         ibdev->ib_dev.modify_srq        = mlx4_ib_modify_srq;
541         ibdev->ib_dev.query_srq         = mlx4_ib_query_srq;
542         ibdev->ib_dev.destroy_srq       = mlx4_ib_destroy_srq;
543         ibdev->ib_dev.post_srq_recv     = mlx4_ib_post_srq_recv;
544         ibdev->ib_dev.create_qp         = mlx4_ib_create_qp;
545         ibdev->ib_dev.modify_qp         = mlx4_ib_modify_qp;
546         ibdev->ib_dev.query_qp          = mlx4_ib_query_qp;
547         ibdev->ib_dev.destroy_qp        = mlx4_ib_destroy_qp;
548         ibdev->ib_dev.post_send         = mlx4_ib_post_send;
549         ibdev->ib_dev.post_recv         = mlx4_ib_post_recv;
550         ibdev->ib_dev.create_cq         = mlx4_ib_create_cq;
551         ibdev->ib_dev.modify_cq         = mlx4_ib_modify_cq;
552         ibdev->ib_dev.destroy_cq        = mlx4_ib_destroy_cq;
553         ibdev->ib_dev.poll_cq           = mlx4_ib_poll_cq;
554         ibdev->ib_dev.req_notify_cq     = mlx4_ib_arm_cq;
555         ibdev->ib_dev.get_dma_mr        = mlx4_ib_get_dma_mr;
556         ibdev->ib_dev.reg_user_mr       = mlx4_ib_reg_user_mr;
557         ibdev->ib_dev.dereg_mr          = mlx4_ib_dereg_mr;
558         ibdev->ib_dev.attach_mcast      = mlx4_ib_mcg_attach;
559         ibdev->ib_dev.detach_mcast      = mlx4_ib_mcg_detach;
560         ibdev->ib_dev.process_mad       = mlx4_ib_process_mad;
561
562         ibdev->ib_dev.alloc_fmr         = mlx4_ib_fmr_alloc;
563         ibdev->ib_dev.map_phys_fmr      = mlx4_ib_map_phys_fmr;
564         ibdev->ib_dev.unmap_fmr         = mlx4_ib_unmap_fmr;
565         ibdev->ib_dev.dealloc_fmr       = mlx4_ib_fmr_dealloc;
566         ibdev->ib_dev.x.find_cached_gid         = ib_find_cached_gid;
567         ibdev->ib_dev.x.find_cached_pkey        = ib_find_cached_pkey;
568         ibdev->ib_dev.x.get_cached_gid          = ib_get_cached_gid;
569         ibdev->ib_dev.x.get_cached_pkey         = ib_get_cached_pkey;
570         ibdev->ib_dev.x.register_ev_cb          = mlx4_reset_cb_register;
571         ibdev->ib_dev.x.unregister_ev_cb        = mlx4_reset_cb_unregister;
572 #if 1//WORKAROUND_POLL_EQ
573         ibdev->ib_dev.x.poll_eq                         = mlx4_poll_eq;
574 #endif
575         if (mlx4_is_livefish(ibdev->dev)) {
576                 if (ib_register_device(&ibdev->ib_dev))
577                         goto err_dealloc;
578                 return ibdev;
579         }
580
581         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
582                 goto err_dealloc;
583
584         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
585                 goto err_pd;
586
587         ibdev->uar_map = ioremap((u64)ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
588         if (!ibdev->uar_map)
589                 goto err_uar;
590
591         if (init_node_data(ibdev))
592                 goto err_map;
593
594         spin_lock_init(&ibdev->sm_lock);
595         mutex_init(&ibdev->cap_mask_mutex);
596
597         if (ib_register_device(&ibdev->ib_dev))
598                 goto err_map;
599
600         mlx4_dbg(ibdev->dev, "MLX4_BUS: IB interface is ADDED ! \n");
601
602         return ibdev;
603
604 err_map:
605         iounmap(ibdev->uar_map, PAGE_SIZE);
606
607 err_uar:
608         mlx4_uar_free(dev, &ibdev->priv_uar);
609
610 err_pd:
611         mlx4_pd_free(dev, ibdev->priv_pdn);
612
613 err_dealloc:
614         ibdev->ib_dev.reg_state = IB_DEV_UNINITIALIZED;
615         ib_dealloc_device(&ibdev->ib_dev);
616
617         return NULL;
618 }
619
620 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
621 {
622         struct mlx4_ib_dev *ibdev = ibdev_ptr;
623         int p;
624
625         if (mlx4_is_livefish(ibdev->dev)) {
626                 ib_unregister_device(&ibdev->ib_dev);
627                 goto dealloc_dev;
628         }
629         
630         for (p = 1; p <= dev->caps.num_ports; ++p)
631                 mlx4_CLOSE_PORT(dev, p);
632
633         ib_unregister_device(&ibdev->ib_dev);
634         iounmap(ibdev->uar_map,PAGE_SIZE);
635         mlx4_uar_free(dev, &ibdev->priv_uar);
636         mlx4_pd_free(dev, ibdev->priv_pdn);
637 dealloc_dev:    
638         mlx4_dbg(ibdev->dev, "MLX4_BUS: IB interface is REMOVED ! \n");
639         ibdev->ib_dev.reg_state = IB_DEV_UNINITIALIZED;
640         ib_dealloc_device(&ibdev->ib_dev);
641 }
642
643 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
644                           enum mlx4_dev_event event, int subtype,
645                           int port)
646 {
647         struct ib_event ibev;
648
649         UNUSED_PARAM(dev);
650
651         switch (event) {
652         case MLX4_EVENT_TYPE_PORT_CHANGE:
653                 ibev.event = subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
654                         IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
655                 break;
656
657         case MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR:
658                 ibev.event = IB_EVENT_DEVICE_FATAL;
659                 break;
660
661         default:
662                 return;
663         }
664
665         ibev.device           = ibdev_ptr;
666         ibev.element.port_num = (u8)port;
667
668         ib_dispatch_event(&ibev);
669 }
670
671 static struct mlx4_interface mlx4_ib_interface = {
672         mlx4_ib_add,            /* add */
673         mlx4_ib_remove,         /* remove */
674         mlx4_ib_event,          /* event */
675         NULL, NULL                      /* list */
676 };
677
678 int __init mlx4_ib_init(void)
679 {
680         mlx4_ib_qp_init();
681         return mlx4_register_interface(&mlx4_ib_interface);
682 }
683
684 void __exit mlx4_ib_cleanup(void)
685 {
686         mlx4_unregister_interface(&mlx4_ib_interface);
687 }
688
689
690