fix cleanup flow for memfree devices
[etherboot.git] / src / drivers / net / mlx_ipoib / ib_mt23108.c
1 /*
2   This software is available to you under a choice of one of two
3   licenses.  You may choose to be licensed under the terms of the GNU
4   General Public License (GPL) Version 2, available at
5   <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
6   license, available in the LICENSE.TXT file accompanying this
7   software.  These details are also available at
8   <http://openib.org/license.html>.
9
10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17   SOFTWARE.
18
19   Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
20 */
21
22 #include "mt23108.h"
23 #include "ib_driver.h"
24 #include "pci.h"
25
26 struct device_buffers_st {
27         union recv_wqe_u mads_qp_rcv_queue[NUM_MADS_RCV_WQES]
28             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
29         union recv_wqe_u ipoib_qp_rcv_queue[NUM_IPOIB_RCV_WQES]
30             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
31         union ud_send_wqe_u mads_qp_snd_queue[NUM_MADS_SND_WQES]
32             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
33         union ud_send_wqe_u ipoib_qp_snd_queue[NUM_IPOIB_SND_WQES]
34             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
35         u8 inprm_buf[INPRM_BUF_SZ] __attribute__ ((aligned(INPRM_BUF_ALIGN)));
36         u8 outprm_buf[OUTPRM_BUF_SZ]
37             __attribute__ ((aligned(OUTPRM_BUF_ALIGN)));
38         struct eqe_t eq_buf[1 << LOG2_EQ_SZ]
39             __attribute__ ((aligned(sizeof(struct eqe_t))));
40         union cqe_st mads_snd_cq_buf[NUM_MADS_SND_CQES]
41             __attribute__ ((aligned(sizeof(union cqe_st))));
42         union cqe_st ipoib_snd_cq_buf[NUM_IPOIB_SND_CQES]
43             __attribute__ ((aligned(sizeof(union cqe_st))));
44         union cqe_st mads_rcv_cq_buf[NUM_MADS_RCV_CQES]
45             __attribute__ ((aligned(sizeof(union cqe_st))));
46         union cqe_st ipoib_rcv_cq_buf[NUM_IPOIB_RCV_CQES]
47             __attribute__ ((aligned(sizeof(union cqe_st))));
48         union ud_av_u av_array[NUM_AVS]
49             __attribute__ ((aligned(ADDRESS_VECTOR_ST_ALIGN)));
50 } __attribute__ ((packed));
51
52 #define STRUCT_ALIGN_SZ 4096
53 #define SRC_BUF_SZ (sizeof(struct device_buffers_st) + STRUCT_ALIGN_SZ - 1)
54
55 /* the following must be kept in this order
56    for the memory region to cover the buffers */
57 static u8 src_buf[SRC_BUF_SZ];
58 static struct ib_buffers_st ib_buffers;
59 static __u32 memreg_size;
60 /* end of order constraint */
61
62 static struct dev_pci_struct tavor_pci_dev;
63 static struct device_buffers_st *dev_buffers_p;
64 static struct device_ib_data_st dev_ib_data;
65
66 static int gw_write_cr(__u32 addr, __u32 data)
67 {
68         writel(htonl(data), tavor_pci_dev.cr_space + addr);
69         return 0;
70 }
71
72 static int gw_read_cr(__u32 addr, __u32 * result)
73 {
74         *result = ntohl(readl(tavor_pci_dev.cr_space + addr));
75         return 0;
76 }
77
78 static int reset_hca(void)
79 {
80         return gw_write_cr(TAVOR_RESET_OFFSET, 1);
81 }
82
83 static int find_mlx_bridge(__u8 hca_bus, __u8 * br_bus_p, __u8 * br_devfn_p)
84 {
85         int bus;
86         int dev;
87         int devfn;
88         int rc;
89         __u16 vendor, dev_id;
90         __u8 sec_bus;
91
92         for (bus = 0; bus < 256; ++bus) {
93                 for (dev = 0; dev < 32; ++dev) {
94                         devfn = (dev << 3);
95                         rc = pcibios_read_config_word(bus, devfn, PCI_VENDOR_ID,
96                                                       &vendor);
97                         if (rc)
98                                 return rc;
99
100                         if (vendor != MELLANOX_VENDOR_ID)
101                                 continue;
102
103                         rc = pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID,
104                                                       &dev_id);
105                         if (rc)
106                                 return rc;
107
108                         if (dev_id != TAVOR_BRIDGE_DEVICE_ID)
109                                 continue;
110
111                         rc = pcibios_read_config_byte(bus, devfn,
112                                                       PCI_SECONDARY_BUS,
113                                                       &sec_bus);
114                         if (rc)
115                                 return rc;
116
117                         if (sec_bus == hca_bus) {
118                                 *br_bus_p = bus;
119                                 *br_devfn_p = devfn;
120                                 return 0;
121                         }
122                 }
123         }
124
125         return -1;
126 }
127
128 static int ib_device_init(struct pci_device *dev)
129 {
130         int i;
131         int rc;
132         __u8 br_bus, br_devfn;
133
134         tprintf("");
135
136         memset(&dev_ib_data, 0, sizeof dev_ib_data);
137
138         /* save bars */
139         tprintf("bus=%d devfn=0x%x", dev->bus, dev->devfn);
140         for (i = 0; i < 6; ++i) {
141                 tavor_pci_dev.dev.bar[i] =
142                     pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
143                 tprintf("bar[%d]= 0x%08lx", i, tavor_pci_dev.dev.bar[i]);
144         }
145
146         tprintf("");
147         /* save config space */
148         for (i = 0; i < 64; ++i) {
149                 rc = pci_read_config_dword(dev, i << 2,
150                                            &tavor_pci_dev.dev.
151                                            dev_config_space[i]);
152                 if (rc) {
153                         eprintf("");
154                         return rc;
155                 }
156                 tprintf("config[%d]= 0x%08lx", i << 2,
157                         tavor_pci_dev.dev.dev_config_space[i]);
158         }
159
160         tprintf("");
161         tavor_pci_dev.dev.dev = dev;
162
163         tprintf("");
164         if (dev->dev_id == TAVOR_DEVICE_ID) {
165
166                 rc = find_mlx_bridge(dev->bus, &br_bus, &br_devfn);
167                 if (rc) {
168                         eprintf("");
169                         return rc;
170                 }
171
172                 tavor_pci_dev.br.bus = br_bus;
173                 tavor_pci_dev.br.devfn = br_devfn;
174
175                 tprintf("bus=%d devfn=0x%x", br_bus, br_devfn);
176                 /* save config space */
177                 for (i = 0; i < 64; ++i) {
178                         rc = pcibios_read_config_dword(br_bus, br_devfn, i << 2,
179                                                        &tavor_pci_dev.br.
180                                                        dev_config_space[i]);
181                         if (rc) {
182                                 eprintf("");
183                                 return rc;
184                         }
185                         tprintf("config[%d]= 0x%08lx", i << 2,
186                                 tavor_pci_dev.br.dev_config_space[i]);
187                 }
188         }
189
190         tprintf("");
191
192         /* map cr-space */
193         tavor_pci_dev.cr_space = ioremap(tavor_pci_dev.dev.bar[0], 0x100000);
194         if (!tavor_pci_dev.cr_space) {
195                 eprintf("");
196                 return -1;
197         }
198
199         /* map uar */
200         tavor_pci_dev.uar =
201             ioremap(tavor_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, 0x1000);
202         if (!tavor_pci_dev.uar) {
203                 eprintf("");
204                 return -1;
205         }
206         tprintf("uar_base (pa:va) = 0x%lx 0x%lx",
207                 tavor_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, tavor_pci_dev.uar);
208
209         tprintf("");
210
211         return 0;
212 }
213
214 static inline unsigned long lalign(unsigned long buf, unsigned long align)
215 {
216         return (unsigned long)((buf + align - 1) &
217                                (~(((unsigned long)align) - 1)));
218 }
219
220 static int init_dev_data(void)
221 {
222         unsigned long tmp;
223
224         tmp = lalign(virt_to_bus(src_buf), STRUCT_ALIGN_SZ);
225
226         dev_buffers_p = bus_to_virt(tmp);
227         memreg_size = (__u32) (&memreg_size) - (__u32) dev_buffers_p;
228         tprintf("src_buf=0x%lx, dev_buffers_p=0x%lx, memreg_size=0x%x", src_buf,
229                 dev_buffers_p, memreg_size);
230
231         return 0;
232 }
233
234 static int restore_config(void)
235 {
236         int i;
237         int rc;
238
239         if (tavor_pci_dev.dev.dev->dev_id == TAVOR_DEVICE_ID) {
240                 for (i = 0; i < 64; ++i) {
241                         rc = pcibios_write_config_dword(tavor_pci_dev.br.bus,
242                                                         tavor_pci_dev.br.devfn,
243                                                         i << 2,
244                                                         tavor_pci_dev.br.
245                                                         dev_config_space[i]);
246                         if (rc) {
247                                 return rc;
248                         }
249                 }
250         }
251
252         for (i = 0; i < 64; ++i) {
253                 if (i != 22 && i != 23) {
254                         rc = pci_write_config_dword(tavor_pci_dev.dev.dev,
255                                                     i << 2,
256                                                     tavor_pci_dev.dev.
257                                                     dev_config_space[i]);
258                         if (rc) {
259                                 return rc;
260                         }
261                 }
262         }
263         return 0;
264 }
265
266 static void prep_init_hca_buf(const struct init_hca_st *init_hca_p, void *buf)
267 {
268         /*struct init_hca_param_st */ void *p = buf;
269         void *tmp;
270
271         memset(buf, 0, MT_STRUCT_SIZE(tavorprm_init_hca_st));
272
273         tmp =
274             p + MT_BYTE_OFFSET(tavorprm_init_hca_st,
275                                qpc_eec_cqc_eqc_rdb_parameters);
276
277         INS_FLD(init_hca_p->qpc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
278                 qpc_base_addr_h);
279         INS_FLD(init_hca_p->
280                 qpc_base_addr_l >> (32 -
281                                     (MT_BIT_SIZE
282                                      (tavorprm_qpcbaseaddr_st,
283                                       qpc_base_addr_l))), tmp,
284                 tavorprm_qpcbaseaddr_st, qpc_base_addr_l);
285         INS_FLD(init_hca_p->log_num_of_qp, tmp, tavorprm_qpcbaseaddr_st,
286                 log_num_of_qp);
287
288         INS_FLD(init_hca_p->cqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
289                 cqc_base_addr_h);
290         INS_FLD(init_hca_p->
291                 cqc_base_addr_l >> (32 -
292                                     (MT_BIT_SIZE
293                                      (tavorprm_qpcbaseaddr_st,
294                                       cqc_base_addr_l))), tmp,
295                 tavorprm_qpcbaseaddr_st, cqc_base_addr_l);
296         INS_FLD(init_hca_p->log_num_of_cq, tmp, tavorprm_qpcbaseaddr_st,
297                 log_num_of_cq);
298
299         INS_FLD(init_hca_p->eqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
300                 eqc_base_addr_h);
301         INS_FLD(init_hca_p->
302                 eqc_base_addr_l >> (32 -
303                                     (MT_BIT_SIZE
304                                      (tavorprm_qpcbaseaddr_st,
305                                       eqc_base_addr_l))), tmp,
306                 tavorprm_qpcbaseaddr_st, eqc_base_addr_l);
307         INS_FLD(LOG2_EQS, tmp, tavorprm_qpcbaseaddr_st, log_num_eq);
308
309         INS_FLD(init_hca_p->srqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
310                 srqc_base_addr_h);
311         INS_FLD(init_hca_p->
312                 srqc_base_addr_l >> (32 -
313                                      (MT_BIT_SIZE
314                                       (tavorprm_qpcbaseaddr_st,
315                                        srqc_base_addr_l))), tmp,
316                 tavorprm_qpcbaseaddr_st, srqc_base_addr_l);
317         INS_FLD(init_hca_p->log_num_of_srq, tmp, tavorprm_qpcbaseaddr_st,
318                 log_num_of_srq);
319
320         INS_FLD(init_hca_p->eqpc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
321                 eqpc_base_addr_h);
322         INS_FLD(init_hca_p->eqpc_base_addr_l, tmp, tavorprm_qpcbaseaddr_st,
323                 eqpc_base_addr_l);
324
325         INS_FLD(init_hca_p->eeec_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
326                 eeec_base_addr_h);
327         INS_FLD(init_hca_p->eeec_base_addr_l, tmp, tavorprm_qpcbaseaddr_st,
328                 eeec_base_addr_l);
329
330         tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, multicast_parameters);
331
332         INS_FLD(init_hca_p->mc_base_addr_h, tmp, tavorprm_multicastparam_st,
333                 mc_base_addr_h);
334         INS_FLD(init_hca_p->mc_base_addr_l, tmp, tavorprm_multicastparam_st,
335                 mc_base_addr_l);
336
337         INS_FLD(init_hca_p->log_mc_table_entry_sz, tmp,
338                 tavorprm_multicastparam_st, log_mc_table_entry_sz);
339         INS_FLD(init_hca_p->log_mc_table_sz, tmp, tavorprm_multicastparam_st,
340                 log_mc_table_sz);
341         INS_FLD(init_hca_p->mc_table_hash_sz, tmp, tavorprm_multicastparam_st,
342                 mc_table_hash_sz);
343
344         tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, tpt_parameters);
345
346         INS_FLD(init_hca_p->mpt_base_addr_h, tmp, tavorprm_tptparams_st,
347                 mpt_base_adr_h);
348         INS_FLD(init_hca_p->mpt_base_addr_l, tmp, tavorprm_tptparams_st,
349                 mpt_base_adr_l);
350         INS_FLD(init_hca_p->log_mpt_sz, tmp, tavorprm_tptparams_st, log_mpt_sz);
351
352         INS_FLD(init_hca_p->mtt_base_addr_h, tmp, tavorprm_tptparams_st,
353                 mtt_base_addr_h);
354         INS_FLD(init_hca_p->mtt_base_addr_l, tmp, tavorprm_tptparams_st,
355                 mtt_base_addr_l);
356
357         tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, uar_parameters);
358         INS_FLD(tavor_pci_dev.dev.bar[3], tmp, tavorprm_uar_params_st,
359                 uar_base_addr_h);
360         INS_FLD(tavor_pci_dev.dev.bar[2] & 0xfff00000, tmp,
361                 tavorprm_uar_params_st, uar_base_addr_l);
362
363 }
364
365 static void prep_sw2hw_mpt_buf(void *buf, __u32 mkey)
366 {
367         INS_FLD(1, buf, tavorprm_mpt_st, m_io);
368         INS_FLD(1, buf, tavorprm_mpt_st, lw);
369         INS_FLD(1, buf, tavorprm_mpt_st, lr);
370         INS_FLD(1, buf, tavorprm_mpt_st, pa);
371         INS_FLD(1, buf, tavorprm_mpt_st, r_w);
372
373         INS_FLD(mkey, buf, tavorprm_mpt_st, mem_key);
374         INS_FLD(GLOBAL_PD, buf, tavorprm_mpt_st, pd);
375
376         INS_FLD(virt_to_bus(dev_buffers_p), buf, tavorprm_mpt_st,
377                 start_address_l);
378         INS_FLD(memreg_size, buf, tavorprm_mpt_st, reg_wnd_len_l);
379 }
380
381 static void prep_sw2hw_eq_buf(void *buf, struct eqe_t *eq)
382 {
383         memset(buf, 0, MT_STRUCT_SIZE(tavorprm_eqc_st));
384
385         INS_FLD(2, buf, tavorprm_eqc_st, st); /* fired */
386         INS_FLD(virt_to_bus(eq), buf, tavorprm_eqc_st, start_address_l);
387         INS_FLD(LOG2_EQ_SZ, buf, tavorprm_eqc_st, log_eq_size);
388         INS_FLD(UAR_IDX, buf, tavorprm_eqc_st, usr_page);
389         INS_FLD(GLOBAL_PD, buf, tavorprm_eqc_st, pd);
390         INS_FLD(dev_ib_data.mkey, buf, tavorprm_eqc_st, lkey);
391 }
392
393 static void init_eq_buf(void *eq_buf)
394 {
395         int num_eqes = 1 << LOG2_EQ_SZ;
396
397         memset(eq_buf, 0xff, num_eqes * sizeof(struct eqe_t));
398 }
399
400 static void prep_init_ib_buf(void *buf)
401 {
402         __u32 *ptr = (__u32 *) buf;
403
404         ptr[0] = 0x4310;
405         ptr[1] = 1;
406         ptr[2] = 64;
407 }
408
409 static void prep_sw2hw_cq_buf(void *buf, __u8 eqn, __u32 cqn,
410                               union cqe_st *cq_buf)
411 {
412         __u32 *ptr = (__u32 *) buf;
413
414         ptr[2] = virt_to_bus(cq_buf);
415         ptr[3] = (LOG2_CQ_SZ << 24) | UAR_IDX;
416         ptr[4] = eqn;
417         ptr[5] = eqn;
418         ptr[6] = dev_ib_data.pd;
419         ptr[7] = dev_ib_data.mkey;
420         ptr[12] = cqn;
421 }
422
423 static void prep_rst2init_qpee_buf(void *buf, __u32 snd_cqn, __u32 rcv_cqn,
424                                    __u32 qkey)
425 {
426         struct qp_ee_state_tarnisition_st *prm;
427         void *tmp;
428
429         prm = (struct qp_ee_state_tarnisition_st *)buf;
430
431         INS_FLD(3, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, st);     /* service type = UD */
432         INS_FLD(3, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, pm_state);       /* required for UD QP */
433         INS_FLD(UAR_IDX, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
434                 usr_page);
435         INS_FLD(dev_ib_data.pd, &prm->ctx,
436                 tavorprm_queue_pair_ee_context_entry_st, pd);
437         INS_FLD(dev_ib_data.mkey, &prm->ctx,
438                 tavorprm_queue_pair_ee_context_entry_st, wqe_lkey);
439         INS_FLD(1, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, ssc);    /* generate send CQE */
440         INS_FLD(1, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, rsc);    /* generate receive CQE */
441         INS_FLD(snd_cqn, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
442                 cqn_snd);
443         INS_FLD(rcv_cqn, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
444                 cqn_rcv);
445         INS_FLD(qkey, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
446                 q_key);
447
448         tmp =
449             (void *)(&prm->ctx) +
450             MT_BYTE_OFFSET(tavorprm_queue_pair_ee_context_entry_st,
451                            primary_address_path);
452         INS_FLD(dev_ib_data.port, tmp, tavorprm_address_path_st, port_number);
453
454         INS_FLD(4, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, mtu);
455         INS_FLD(0xb, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
456                 msg_max);
457 }
458
459 static void prep_init2rtr_qpee_buf(void *buf)
460 {
461         struct qp_ee_state_tarnisition_st *prm;
462
463         prm = (struct qp_ee_state_tarnisition_st *)buf;
464
465         INS_FLD(4, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, mtu);
466         INS_FLD(0xb, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
467                 msg_max);
468 }
469
470 static void init_av_array()
471 {
472         int i;
473
474         dev_ib_data.udav.av_array = dev_buffers_p->av_array;
475         dev_ib_data.udav.udav_next_free = FL_EOL;
476         for (i = 0; i < NUM_AVS; ++i) {
477                 dev_ib_data.udav.av_array[i].ud_av.next_free =
478                     dev_ib_data.udav.udav_next_free;
479                 dev_ib_data.udav.udav_next_free = i;
480         }
481         tprintf("dev_ib_data.udav.udav_next_free=%d", i);
482 }
483
484 static int setup_hca(__u8 port, void **eq_p)
485 {
486         int rc;
487         __u32 key, in_key;
488         __u32 *inprm;
489         struct eqe_t *eq_buf;
490         __u32 event_mask;
491         void *cfg;
492         int ret = 0;
493         __u8 eqn;
494         struct dev_lim_st dev_lim;
495         struct init_hca_st init_hca;
496         __u32 offset, base_h, base_l;
497         const __u32 delta = 0x400000;
498         struct query_fw_st qfw;
499
500         tprintf("called");
501
502         init_dev_data();
503
504         rc = reset_hca();
505         if (rc) {
506                 ret = -1;
507                 eprintf("");
508                 goto exit;
509         } else {
510                 tprintf("reset_hca() success");
511         }
512
513         mdelay(1000);           /* wait for 1 sec */
514
515         rc = restore_config();
516         if (rc) {
517                 ret = -1;
518                 eprintf("");
519                 goto exit;
520         } else {
521                 tprintf("restore_config() success");
522         }
523
524         dev_ib_data.pd = GLOBAL_PD;
525         dev_ib_data.port = port;
526
527         /* execute system enable command */
528         rc = cmd_sys_en();
529         if (rc) {
530                 ret = -1;
531                 eprintf("");
532                 goto exit;
533         } else {
534                 tprintf("cmd_sys_en() success");
535         }
536
537         rc= cmd_query_fw(&qfw);
538         if (rc) {
539                 ret = -1;
540                 eprintf("");
541                 goto exit;
542         } else {
543                 tprintf("cmd_query_fw() success");
544
545                 if (print_info) {
546                         printf("FW ver = %d.%d.%d\n",
547                                 qfw.fw_rev_major,
548                                 qfw.fw_rev_minor,
549                                 qfw.fw_rev_subminor);
550                 }
551                 tprintf("fw_rev_major=%d", qfw.fw_rev_major);
552                 tprintf("fw_rev_minor=%d", qfw.fw_rev_minor);
553                 tprintf("fw_rev_subminor=%d", qfw.fw_rev_subminor);
554                 tprintf("error_buf_start_h=0x%x", qfw.error_buf_start_h);
555                 tprintf("error_buf_start_l=0x%x", qfw.error_buf_start_l);
556                 tprintf("error_buf_size=%d", qfw.error_buf_size);
557         }
558
559         if (qfw.error_buf_start_h) {
560                 eprintf("too high physical address");
561                 ret = -1;
562                 goto exit;
563         }
564
565         dev_ib_data.error_buf_addr= ioremap(qfw.error_buf_start_l,
566                                                                                 qfw.error_buf_size*4);
567         dev_ib_data.error_buf_size= qfw.error_buf_size;
568         if (!dev_ib_data.error_buf_addr) {
569                 eprintf("");
570                 ret = -1;
571                 goto exit;
572         }
573
574
575         rc = cmd_query_dev_lim(&dev_lim);
576         if (rc) {
577                 ret = -1;
578                 eprintf("");
579                 goto exit;
580         } else {
581                 tprintf("cmd_query_dev_lim() success");
582                 tprintf("log2_rsvd_qps=%x", dev_lim.log2_rsvd_qps);
583                 tprintf("qpc_entry_sz=%x", dev_lim.qpc_entry_sz);
584                 tprintf("log2_rsvd_srqs=%x", dev_lim.log2_rsvd_srqs);
585                 tprintf("srq_entry_sz=%x", dev_lim.srq_entry_sz);
586                 tprintf("log2_rsvd_ees=%x", dev_lim.log2_rsvd_ees);
587                 tprintf("eec_entry_sz=%x", dev_lim.eec_entry_sz);
588                 tprintf("log2_rsvd_cqs=%x", dev_lim.log2_rsvd_cqs);
589                 tprintf("cqc_entry_sz=%x", dev_lim.cqc_entry_sz);
590                 tprintf("log2_rsvd_mtts=%x", dev_lim.log2_rsvd_mtts);
591                 tprintf("mtt_entry_sz=%x", dev_lim.mtt_entry_sz);
592                 tprintf("log2_rsvd_mrws=%x", dev_lim.log2_rsvd_mrws);
593                 tprintf("mpt_entry_sz=%x", dev_lim.mpt_entry_sz);
594                 tprintf("eqc_entry_sz=%x", dev_lim.eqc_entry_sz);
595         }
596
597         /* set the qp and cq numbers according
598            to the results of query_dev_lim */
599         dev_ib_data.mads_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
600             +QPN_BASE + MADS_QPN_SN;
601         dev_ib_data.ipoib_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
602             +QPN_BASE + IPOIB_QPN_SN;
603
604         dev_ib_data.mads_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
605             MADS_SND_CQN_SN;
606         dev_ib_data.mads_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
607             MADS_RCV_CQN_SN;
608
609         dev_ib_data.ipoib_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
610             IPOIB_SND_CQN_SN;
611         dev_ib_data.ipoib_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
612             IPOIB_RCV_CQN_SN;
613
614         /* disable SRQ */
615         cfg = (void *)dev_buffers_p->inprm_buf;
616         memset(cfg, 0, MT_STRUCT_SIZE(tavorprm_mod_stat_cfg_st));
617         INS_FLD(1, cfg, tavorprm_mod_stat_cfg_st, srq_m);       //cfg->srq_m = 1;
618         rc = cmd_mod_stat_cfg(cfg);
619         if (rc) {
620                 ret = -1;
621                 eprintf("");
622                 goto exit;
623         } else {
624                 tprintf("cmd_mod_stat_cfg() success");
625         }
626
627         /* prepare the init_hca params to pass
628            to prep_init_hca_buf */
629         memset(&init_hca, 0, sizeof init_hca);
630         offset = 0;
631         base_h = tavor_pci_dev.dev.bar[5] & 0xfffffff0;
632         base_l = tavor_pci_dev.dev.bar[4] & 0xfffffff0;
633
634         tprintf("base_h=0x%lx, base_l=0x%lx", base_h, base_l);
635
636         init_hca.qpc_base_addr_h = base_h;
637         init_hca.qpc_base_addr_l = base_l + offset;
638         init_hca.log_num_of_qp = dev_lim.log2_rsvd_qps + 1;
639         offset += delta;
640
641         init_hca.eec_base_addr_h = base_h;
642         init_hca.eec_base_addr_l = base_l + offset;
643         init_hca.log_num_of_ee = dev_lim.log2_rsvd_ees;
644         offset += delta;
645
646         init_hca.srqc_base_addr_h = base_h;
647         init_hca.srqc_base_addr_l = base_l + offset;
648         init_hca.log_num_of_srq = dev_lim.log2_rsvd_srqs;
649         offset += delta;
650
651         init_hca.cqc_base_addr_h = base_h;
652         init_hca.cqc_base_addr_l = base_l + offset;
653         init_hca.log_num_of_cq = dev_lim.log2_rsvd_cqs + 1;
654         offset += delta;
655
656         init_hca.eqpc_base_addr_h = base_h;
657         init_hca.eqpc_base_addr_l = base_l + offset;
658         offset += delta;
659
660         init_hca.eeec_base_addr_h = base_h;
661         init_hca.eeec_base_addr_l = base_l + offset;
662         offset += delta;
663
664         init_hca.eqc_base_addr_h = base_h;
665         init_hca.eqc_base_addr_l = base_l + offset;
666         init_hca.log_num_of_eq = LOG2_EQS;
667         offset += delta;
668
669         init_hca.rdb_base_addr_h = base_h;
670         init_hca.rdb_base_addr_l = base_l + offset;
671         offset += delta;
672
673         init_hca.mc_base_addr_h = base_h;
674         init_hca.mc_base_addr_l = base_l + offset;
675         init_hca.log_mc_table_entry_sz = LOG2_MC_ENTRY;
676         init_hca.mc_table_hash_sz = 0;
677         init_hca.log_mc_table_sz = LOG2_MC_GROUPS;
678         offset += delta;
679
680         init_hca.mpt_base_addr_h = base_h;
681         init_hca.mpt_base_addr_l = base_l + offset;
682         init_hca.log_mpt_sz = dev_lim.log2_rsvd_mrws + 1;
683         offset += delta;
684
685         init_hca.mtt_base_addr_h = base_h;
686         init_hca.mtt_base_addr_l = base_l + offset;
687
688         /* this buffer is used for all the commands */
689         inprm = (void *)dev_buffers_p->inprm_buf;
690         /* excute init_hca command */
691         prep_init_hca_buf(&init_hca, inprm);
692
693         rc = cmd_init_hca(inprm, MT_STRUCT_SIZE(tavorprm_init_hca_st));
694         if (rc) {
695                 ret = -1;
696                 eprintf("");
697                 goto undo_sys_en;
698         } else
699                 tprintf("cmd_init_hca() success");
700
701         /* register a single memory region which covers
702            4 GB of the address space which will be used
703            throughout the driver */
704         memset(inprm, 0, SW2HW_MPT_IBUF_SZ);
705         in_key = MKEY_PREFIX + (1 << dev_lim.log2_rsvd_mrws);
706         prep_sw2hw_mpt_buf(inprm, in_key);
707         rc = cmd_sw2hw_mpt(&key, in_key, inprm, SW2HW_MPT_IBUF_SZ);
708         if (rc) {
709                 ret = -1;
710                 eprintf("");
711                 goto undo_init_hca;
712         } else {
713                 tprintf("cmd_sw2hw_mpt() success, key=0x%lx", key);
714         }
715         dev_ib_data.mkey = key;
716
717         eqn = EQN;
718         /* allocate a single EQ which will receive 
719            all the events */
720         eq_buf = dev_buffers_p->eq_buf;
721         init_eq_buf(eq_buf);    /* put in HW ownership */
722         prep_sw2hw_eq_buf(inprm, eq_buf);
723         rc = cmd_sw2hw_eq(SW2HW_EQ_IBUF_SZ);
724         if (rc) {
725                 ret = -1;
726                 eprintf("");
727                 goto undo_sw2hw_mpt;
728         } else
729                 tprintf("cmd_sw2hw_eq() success");
730
731         event_mask = (1 << XDEV_EV_TYPE_CQ_COMP) |
732             (1 << XDEV_EV_TYPE_CQ_ERR) |
733             (1 << XDEV_EV_TYPE_LOCAL_WQ_CATAS_ERR) |
734             (1 << XDEV_EV_TYPE_PORT_ERR) |
735             (1 << XDEV_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR) |
736             (1 << XDEV_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR) |
737             (1 << TAVOR_IF_EV_TYPE_OVERRUN);
738         rc = cmd_map_eq(eqn, event_mask, 1);
739         if (rc) {
740                 ret = -1;
741                 eprintf("");
742                 goto undo_sw2hw_eq;
743         } else
744                 tprintf("cmd_map_eq() success");
745
746         dev_ib_data.eq.eqn = eqn;
747         dev_ib_data.eq.eq_buf = eq_buf;
748         dev_ib_data.eq.cons_idx = 0;
749         dev_ib_data.eq.eq_size = 1 << LOG2_EQ_SZ;
750         *eq_p = &dev_ib_data.eq;
751
752         memset(inprm, 0, INIT_IB_IBUF_SZ);
753         prep_init_ib_buf(inprm);
754         rc = cmd_init_ib(port, inprm, INIT_IB_IBUF_SZ);
755         if (rc) {
756                 ret = -1;
757                 eprintf("");
758                 goto undo_sw2hw_eq;
759         } else
760                 tprintf("cmd_init_ib() success");
761
762         init_av_array();
763         tprintf("init_av_array() done");
764
765         goto exit;
766
767       undo_sw2hw_eq:
768         rc = cmd_hw2sw_eq(EQN);
769         if (rc) {
770                 eprintf("");
771         } else
772                 tprintf("cmd_hw2sw_eq() success");
773
774       undo_sw2hw_mpt:
775         rc = cmd_hw2sw_mpt(key);
776         if (rc)
777                 eprintf("");
778         else
779                 tprintf("cmd_hw2sw_mpt() success key=0x%lx", key);
780
781       undo_init_hca:
782         rc = cmd_close_hca(0);
783         if (rc) {
784                 eprintf("");
785                 goto undo_sys_en;
786         } else
787                 tprintf("cmd_close_hca() success");
788
789       undo_sys_en:
790         rc = cmd_sys_dis();
791         if (rc) {
792                 eprintf("");
793                 goto undo_sys_en;
794         } else
795                 tprintf("cmd_sys_dis() success");
796         goto exit;
797
798       exit:
799         return ret;
800 }
801
802
803 static int unset_hca(void)
804 {
805         int rc = 0;
806
807         if (!fw_fatal) {
808                 rc = cmd_sys_dis();
809                 if (rc)
810                         eprintf("");
811         }
812
813         return rc;
814 }
815
816 static void *get_inprm_buf(void)
817 {
818         return dev_buffers_p->inprm_buf;
819 }
820
821 static void *get_outprm_buf(void)
822 {
823         return dev_buffers_p->outprm_buf;
824 }
825
826 static void *get_send_wqe_buf(void *wqe, __u8 index)
827 {
828         struct ud_send_wqe_st *snd_wqe = wqe;
829
830         return bus_to_virt(snd_wqe->mpointer[index].local_addr_l);
831 }
832
833 static void *get_rcv_wqe_buf(void *wqe, __u8 index)
834 {
835         struct recv_wqe_st *rcv_wqe = wqe;
836
837         return bus_to_virt(be32_to_cpu(rcv_wqe->mpointer[index].local_addr_l));
838 }
839
840 static void modify_av_params(struct ud_av_st *av,
841                              __u16 dlid,
842                              __u8 g,
843                              __u8 sl, __u8 rate, union ib_gid_u *gid, __u32 qpn)
844 {
845         memset(&av->av, 0, sizeof av->av);
846         INS_FLD(dev_ib_data.port, &av->av, tavorprm_ud_address_vector_st,
847                 port_number);
848         INS_FLD(dev_ib_data.pd, &av->av, tavorprm_ud_address_vector_st, pd);
849         INS_FLD(dlid, &av->av, tavorprm_ud_address_vector_st, rlid);
850         INS_FLD(g, &av->av, tavorprm_ud_address_vector_st, g);
851         INS_FLD(sl, &av->av, tavorprm_ud_address_vector_st, sl);
852         INS_FLD(3, &av->av, tavorprm_ud_address_vector_st, msg);
853
854         if (rate >= 3)
855                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, max_stat_rate);      /* 4x */
856         else
857                 INS_FLD(1, &av->av, tavorprm_ud_address_vector_st, max_stat_rate);      /* 1x */
858
859         cpu_to_be_buf(&av->av, sizeof(av->av));
860         if (g) {
861                 if (gid) {
862                         INS_FLD(*((__u32 *) (&gid->raw[0])), &av->av,
863                                 tavorprm_ud_address_vector_st, rgid_127_96);
864                         INS_FLD(*((__u32 *) (&gid->raw[4])), &av->av,
865                                 tavorprm_ud_address_vector_st, rgid_95_64);
866                         INS_FLD(*((__u32 *) (&gid->raw[8])), &av->av,
867                                 tavorprm_ud_address_vector_st, rgid_63_32);
868                         INS_FLD(*((__u32 *) (&gid->raw[12])), &av->av,
869                                 tavorprm_ud_address_vector_st, rgid_31_0);
870                 } else {
871                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
872                                 rgid_127_96);
873                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
874                                 rgid_95_64);
875                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
876                                 rgid_63_32);
877                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
878                                 rgid_31_0);
879                 }
880         } else {
881                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_127_96);
882                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_95_64);
883                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_63_32);
884                 INS_FLD(2, &av->av, tavorprm_ud_address_vector_st, rgid_31_0);
885         }
886         av->dest_qp = qpn;
887 }
888
889 static void init_cq_buf(union cqe_st *cq_buf, __u8 num_cqes)
890 {
891         memset(cq_buf, 0xff, num_cqes * sizeof cq_buf[0]);
892 }
893
894 static int post_rcv_buf(struct udqp_st *qp, struct recv_wqe_st *rcv_wqe)
895 {
896         struct recv_doorbell_st dbell;
897         int rc;
898         __u32 tmp[2];
899         struct recv_wqe_st *tmp_wqe = (struct recv_wqe_st *)tmp;
900         __u32 *ptr_dst;
901
902         memset(&dbell, 0, sizeof dbell);
903         INS_FLD(sizeof(*rcv_wqe) >> 4, &dbell, tavorprm_receive_doorbell_st,
904                 nds);
905         INS_FLD(virt_to_bus(rcv_wqe) >> 6, &dbell, tavorprm_receive_doorbell_st,
906                 nda);
907         INS_FLD(qp->qpn, &dbell, tavorprm_receive_doorbell_st, qpn);
908         INS_FLD(1, &dbell, tavorprm_receive_doorbell_st, credits);
909
910         if (qp->last_posted_rcv_wqe) {
911                 memcpy(tmp, qp->last_posted_rcv_wqe, sizeof(tmp));
912                 be_to_cpu_buf(tmp, sizeof(tmp));
913                 INS_FLD(1, tmp_wqe->next, wqe_segment_next_st, dbd);
914                 INS_FLD(sizeof(*rcv_wqe) >> 4, tmp_wqe->next,
915                         wqe_segment_next_st, nds);
916                 INS_FLD(virt_to_bus(rcv_wqe) >> 6, tmp_wqe->next,
917                         wqe_segment_next_st, nda_31_6);
918                 /* this is not really opcode but since the struct
919                    is used for both send and receive, in receive this bit must be 1
920                    which coinsides with nopcode */
921                 INS_FLD(1, tmp_wqe->next, wqe_segment_next_st, nopcode);
922
923                 cpu_to_be_buf(tmp, sizeof(tmp));
924
925                 ptr_dst = (__u32 *) (qp->last_posted_rcv_wqe);
926                 ptr_dst[0] = tmp[0];
927                 ptr_dst[1] = tmp[1];
928         }
929         rc = cmd_post_doorbell(&dbell, POST_RCV_OFFSET);
930         if (!rc) {
931                 qp->last_posted_rcv_wqe = rcv_wqe;
932         }
933
934         return rc;
935 }
936
937 static int post_send_req(void *qph, void *wqeh, __u8 num_gather)
938 {
939         struct send_doorbell_st dbell;
940         int rc;
941         struct udqp_st *qp = qph;
942         struct ud_send_wqe_st *snd_wqe = wqeh;
943         struct next_control_seg_st tmp;
944         __u32 *psrc, *pdst;
945         __u32 nds;
946
947         tprintf("snd_wqe=0x%lx, virt_to_bus(snd_wqe)=0x%lx", snd_wqe,
948                 virt_to_bus(snd_wqe));
949
950         memset(&dbell, 0, sizeof dbell);
951         INS_FLD(XDEV_NOPCODE_SEND, &dbell, tavorprm_send_doorbell_st, nopcode);
952         INS_FLD(1, &dbell, tavorprm_send_doorbell_st, f);
953         INS_FLD(virt_to_bus(snd_wqe) >> 6, &dbell, tavorprm_send_doorbell_st,
954                 nda);
955         nds =
956             (sizeof(snd_wqe->next) + sizeof(snd_wqe->udseg) +
957              sizeof(snd_wqe->mpointer[0]) * num_gather) >> 4;
958         INS_FLD(nds, &dbell, tavorprm_send_doorbell_st, nds);
959         INS_FLD(qp->qpn, &dbell, tavorprm_send_doorbell_st, qpn);
960
961         tprintf("0= %lx", ((__u32 *) ((void *)(&dbell)))[0]);
962         tprintf("1= %lx", ((__u32 *) ((void *)(&dbell)))[1]);
963
964         if (qp->last_posted_snd_wqe) {
965                 memcpy(&tmp, &qp->last_posted_snd_wqe->next, sizeof tmp);
966                 be_to_cpu_buf(&tmp, sizeof tmp);
967                 INS_FLD(1, &tmp, wqe_segment_next_st, dbd);
968                 INS_FLD(virt_to_bus(snd_wqe) >> 6, &tmp, wqe_segment_next_st,
969                         nda_31_6);
970                 INS_FLD(nds, &tmp, wqe_segment_next_st, nds);
971
972                 psrc = (__u32 *) (&tmp);
973                 pdst = (__u32 *) (&qp->last_posted_snd_wqe->next);
974                 pdst[0] = htonl(psrc[0]);
975                 pdst[1] = htonl(psrc[1]);
976         }
977
978         rc = cmd_post_doorbell(&dbell, POST_SND_OFFSET);
979         if (!rc) {
980                 qp->last_posted_snd_wqe = snd_wqe;
981         }
982
983         return rc;
984 }
985
986 static int create_mads_qp(void **qp_pp, void **snd_cq_pp, void **rcv_cq_pp)
987 {
988         __u8 i;
989         int rc;
990         struct udqp_st *qp;
991
992         qp = &dev_ib_data.mads_qp;
993
994         /* set the pointer to the receive WQEs buffer */
995         qp->rcv_wq = dev_buffers_p->mads_qp_rcv_queue;
996
997         qp->send_buf_sz = MAD_BUF_SZ;
998         qp->rcv_buf_sz = MAD_BUF_SZ;
999
1000         qp->recv_wqe_alloc_idx = 0;
1001         qp->max_recv_wqes = NUM_MADS_RCV_WQES;
1002         qp->recv_wqe_cur_free = NUM_MADS_RCV_WQES;
1003
1004         /* iterrate through the list */
1005         for (i = 0; i < NUM_MADS_RCV_WQES; ++i) {
1006                 /* clear the WQE */
1007                 memset(&qp->rcv_wq[i], 0, sizeof(qp->rcv_wq[i]));
1008
1009                 qp->rcv_wq[i].wqe_cont.qp = qp;
1010                 qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
1011         }
1012
1013         /* set the pointer to the send WQEs buffer */
1014         qp->snd_wq = dev_buffers_p->mads_qp_snd_queue;
1015
1016         qp->snd_wqe_alloc_idx = 0;
1017         qp->max_snd_wqes = NUM_MADS_SND_WQES;
1018         qp->snd_wqe_cur_free = NUM_MADS_SND_WQES;
1019
1020         /* iterrate through the list */
1021         for (i = 0; i < NUM_MADS_SND_WQES; ++i) {
1022                 /* clear the WQE */
1023                 memset(&qp->snd_wq[i], 0, sizeof(qp->snd_wq[i]));
1024
1025                 /* link the WQE to the free list */
1026                 qp->snd_wq[i].wqe_cont.qp = qp;
1027                 qp->snd_bufs[i] = ib_buffers.send_mad_buf[i];
1028         }
1029
1030         /* qp number and cq numbers are already set up */
1031         qp->snd_cq.cq_buf = dev_buffers_p->mads_snd_cq_buf;
1032         qp->rcv_cq.cq_buf = dev_buffers_p->mads_rcv_cq_buf;
1033         qp->snd_cq.num_cqes = NUM_MADS_SND_CQES;
1034         qp->rcv_cq.num_cqes = NUM_MADS_RCV_CQES;
1035         qp->qkey = GLOBAL_QKEY;
1036         rc = create_udqp(qp);
1037         if (!rc) {
1038                 *qp_pp = qp;
1039                 *snd_cq_pp = &qp->snd_cq;
1040                 *rcv_cq_pp = &qp->rcv_cq;
1041         }
1042
1043         return rc;
1044 }
1045
1046 static int create_ipoib_qp(void **qp_pp,
1047                            void **snd_cq_pp, void **rcv_cq_pp, __u32 qkey)
1048 {
1049         __u8 i;
1050         int rc;
1051         struct udqp_st *qp;
1052         qp = &dev_ib_data.ipoib_qp;
1053
1054         /* set the pointer to the receive WQEs buffer */
1055         qp->rcv_wq = dev_buffers_p->ipoib_qp_rcv_queue;
1056
1057         qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
1058
1059         qp->recv_wqe_alloc_idx = 0;
1060         qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
1061         qp->recv_wqe_cur_free = NUM_IPOIB_RCV_WQES;
1062
1063         /* iterrate through the list */
1064         for (i = 0; i < NUM_IPOIB_RCV_WQES; ++i) {
1065                 /* clear the WQE */
1066                 memset(&qp->rcv_wq[i], 0, sizeof(qp->rcv_wq[i]));
1067
1068                 /* update data */
1069                 qp->rcv_wq[i].wqe_cont.qp = qp;
1070                 qp->rcv_bufs[i] = ib_buffers.ipoib_rcv_buf[i];
1071                 tprintf("rcv_buf=%lx", qp->rcv_bufs[i]);
1072         }
1073
1074         /* init send queue WQEs list */
1075         /* set the list empty */
1076         qp->snd_wqe_alloc_idx = 0;
1077         qp->max_snd_wqes = NUM_IPOIB_SND_WQES;
1078         qp->snd_wqe_cur_free = NUM_IPOIB_SND_WQES;
1079
1080         /* set the pointer to the send WQEs buffer */
1081         qp->snd_wq = dev_buffers_p->ipoib_qp_snd_queue;
1082
1083         /* iterrate through the list */
1084         for (i = 0; i < NUM_IPOIB_SND_WQES; ++i) {
1085                 /* clear the WQE */
1086                 memset(&qp->snd_wq[i], 0, sizeof(qp->snd_wq[i]));
1087
1088                 /* update data */
1089                 qp->snd_wq[i].wqe_cont.qp = qp;
1090                 qp->snd_bufs[i] = ib_buffers.send_ipoib_buf[i];
1091                 qp->send_buf_sz = 4;
1092         }
1093
1094         /* qp number and cq numbers are already set up */
1095
1096         qp->snd_cq.cq_buf = dev_buffers_p->ipoib_snd_cq_buf;
1097         qp->rcv_cq.cq_buf = dev_buffers_p->ipoib_rcv_cq_buf;
1098         qp->snd_cq.num_cqes = NUM_IPOIB_SND_CQES;
1099         qp->rcv_cq.num_cqes = NUM_IPOIB_RCV_CQES;
1100         qp->qkey = qkey;
1101         rc = create_udqp(qp);
1102         if (!rc) {
1103                 *qp_pp = qp;
1104                 *snd_cq_pp = &qp->snd_cq;
1105                 *rcv_cq_pp = &qp->rcv_cq;
1106         }
1107
1108         return rc;
1109 }
1110
1111 static int create_udqp(struct udqp_st *qp)
1112 {
1113         int rc, ret = 0;
1114         void *inprm;
1115         struct recv_wqe_st *rcv_wqe;
1116
1117         inprm = dev_buffers_p->inprm_buf;
1118
1119         /* create send CQ */
1120         init_cq_buf(qp->snd_cq.cq_buf, qp->snd_cq.num_cqes);
1121         qp->snd_cq.cons_idx = 0;
1122         memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
1123         prep_sw2hw_cq_buf(inprm, dev_ib_data.eq.eqn, qp->snd_cq.cqn,
1124                           qp->snd_cq.cq_buf);
1125         rc = cmd_sw2hw_cq(qp->snd_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1126         if (rc) {
1127                 ret = -1;
1128                 eprintf("");
1129                 goto exit;
1130         }
1131
1132         /* create receive CQ */
1133         init_cq_buf(qp->rcv_cq.cq_buf, qp->rcv_cq.num_cqes);
1134         qp->rcv_cq.cons_idx = 0;
1135         memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
1136         prep_sw2hw_cq_buf(inprm, dev_ib_data.eq.eqn, qp->rcv_cq.cqn,
1137                           qp->rcv_cq.cq_buf);
1138         rc = cmd_sw2hw_cq(qp->rcv_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1139         if (rc) {
1140                 ret = -1;
1141                 eprintf("");
1142                 goto undo_snd_cq;
1143         }
1144
1145         memset(inprm, 0, QPCTX_IBUF_SZ);
1146         prep_rst2init_qpee_buf(inprm, qp->snd_cq.cqn, qp->rcv_cq.cqn, qp->qkey);
1147         rc = cmd_rst2init_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1148         if (rc) {
1149                 ret = -1;
1150                 eprintf("");
1151                 goto undo_rcv_cq;
1152         }
1153
1154         qp->last_posted_rcv_wqe = NULL;
1155         qp->last_posted_snd_wqe = NULL;
1156
1157         /* post all the buffers to the receive queue */
1158         while (1) {
1159                 /* allocate wqe */
1160                 rcv_wqe = alloc_rcv_wqe(qp);
1161                 if (!rcv_wqe)
1162                         break;
1163
1164                 /* post the buffer */
1165                 rc = post_rcv_buf(qp, rcv_wqe);
1166                 if (rc) {
1167                         ret = -1;
1168                         eprintf("");
1169                         goto undo_rcv_cq;
1170                 }
1171         }
1172
1173         memset(inprm, 0, QPCTX_IBUF_SZ);
1174         prep_init2rtr_qpee_buf(inprm);
1175         rc = cmd_init2rtr_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1176         if (rc) {
1177                 ret = -1;
1178                 eprintf("");
1179                 goto undo_rcv_cq;
1180         }
1181
1182         memset(inprm, 0, QPCTX_IBUF_SZ);
1183         rc = cmd_rtr2rts_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1184         if (rc) {
1185                 ret = -1;
1186                 eprintf("");
1187                 goto undo_rcv_cq;
1188         }
1189
1190         goto exit;
1191
1192       undo_rcv_cq:
1193         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1194         if (rc)
1195                 eprintf("");
1196
1197       undo_snd_cq:
1198         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1199         if (rc)
1200                 eprintf("");
1201
1202       exit:
1203         return ret;
1204 }
1205
1206 static int destroy_udqp(struct udqp_st *qp)
1207 {
1208         int rc;
1209
1210         rc = cmd_2err_qpee(qp->qpn);
1211         if (rc) {
1212                 eprintf("");
1213                 return rc;
1214         }
1215         tprintf("cmd_2err_qpee(0x%lx) success", qp->qpn);
1216
1217         rc = cmd_2rst_qpee(qp->qpn);
1218         if (rc) {
1219                 eprintf("");
1220                 return rc;
1221         }
1222         tprintf("cmd_2rst_qpee(0x%lx) success", qp->qpn);
1223
1224         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1225         if (rc) {
1226                 eprintf("");
1227                 return rc;
1228         }
1229         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->snd_cq.cqn);
1230
1231         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1232         if (rc) {
1233                 eprintf("");
1234                 return rc;
1235         }
1236         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->rcv_cq.cqn);
1237
1238         return rc;
1239 }
1240
1241 static void prep_send_wqe_buf(void *qph,
1242                               void *avh,
1243                               void *wqeh,
1244                               const void *buf,
1245                               unsigned int offset, __u16 len, __u8 e)
1246 {
1247         struct udqp_st *qp = qph;
1248         struct ud_av_st *av = avh;
1249         struct ud_send_wqe_st *wqe = wqeh;
1250
1251         INS_FLD(e, wqe->next.control, wqe_segment_ctrl_send_st, e);
1252         INS_FLD(1, wqe->next.control, wqe_segment_ctrl_send_st, always1);
1253
1254         wqe->udseg.av_add_h = 0;
1255         wqe->udseg.av_add_l = virt_to_bus(&av->av);
1256         wqe->udseg.dest_qp = av->dest_qp;
1257         wqe->udseg.lkey = dev_ib_data.mkey;
1258         wqe->udseg.qkey = qp->qkey;
1259
1260         if (buf) {
1261                 memcpy(bus_to_virt(wqe->mpointer[0].local_addr_l) + offset, buf,
1262                        len);
1263                 len += offset;
1264         }
1265         wqe->mpointer[0].byte_count = len;
1266         wqe->mpointer[0].lkey = dev_ib_data.mkey;
1267
1268         cpu_to_be_buf(wqe, sizeof *wqe);
1269 }
1270
1271 static void *alloc_ud_av(void)
1272 {
1273         u8 next_free;
1274
1275         if (dev_ib_data.udav.udav_next_free == FL_EOL) {
1276                 return NULL;
1277         }
1278
1279         next_free = dev_ib_data.udav.udav_next_free;
1280         dev_ib_data.udav.udav_next_free =
1281             dev_buffers_p->av_array[next_free].ud_av.next_free;
1282         tprintf("allocated udav %d", next_free);
1283         return &dev_buffers_p->av_array[next_free].ud_av;
1284 }
1285
1286 static void free_ud_av(void *avh)
1287 {
1288         union ud_av_u *avu;
1289         __u8 idx, old_idx;
1290         struct ud_av_st *av = avh;
1291
1292         avu = (union ud_av_u *)av;
1293
1294         idx = avu - dev_buffers_p->av_array;
1295         tprintf("freeing udav idx=%d", idx);
1296         old_idx = dev_ib_data.udav.udav_next_free;
1297         dev_ib_data.udav.udav_next_free = idx;
1298         avu->ud_av.next_free = old_idx;
1299 }
1300
1301 static int update_cq_cons_idx(struct cq_st *cq)
1302 {
1303         struct cq_dbell_st dbell;
1304         int rc;
1305
1306         memset(&dbell, 0, sizeof dbell);
1307         INS_FLD(cq->cqn, &dbell, tavorprm_cq_cmd_doorbell_st, cqn);
1308         INS_FLD(CQ_DBELL_CMD_INC_CONS_IDX, &dbell, tavorprm_cq_cmd_doorbell_st,
1309                 cq_cmd);
1310         rc = cmd_post_doorbell(&dbell, CQ_DBELL_OFFSET);
1311         return rc;
1312 }
1313
1314 static int poll_cq(void *cqh, union cqe_st *cqe_p, u8 * num_cqes)
1315 {
1316         union cqe_st cqe;
1317         int rc;
1318         u32 *ptr;
1319         struct cq_st *cq = cqh;
1320
1321         if (cq->cqn < 0x80 || cq->cqn > 0x83) {
1322                 eprintf("");
1323                 return -1;
1324         }
1325         ptr = (u32 *) (&(cq->cq_buf[cq->cons_idx]));
1326         barrier();
1327         if ((ptr[7] & 0x80000000) == 0) {
1328                 cqe = cq->cq_buf[cq->cons_idx];
1329                 be_to_cpu_buf(&cqe, sizeof(cqe));
1330                 *cqe_p = cqe;
1331                 ptr[7] = 0x80000000;
1332                 barrier();
1333                 cq->cons_idx = (cq->cons_idx + 1) % cq->num_cqes;
1334                 rc = update_cq_cons_idx(cq);
1335                 if (rc) {
1336                         return rc;
1337                 }
1338                 *num_cqes = 1;
1339         } else
1340                 *num_cqes = 0;
1341
1342         return 0;
1343 }
1344
1345 static void dev2ib_cqe(struct ib_cqe_st *ib_cqe_p, union cqe_st *cqe_p)
1346 {
1347         __u8 opcode;
1348         __u32 wqe_addr_ba;
1349
1350         opcode =
1351             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st, opcode);
1352         if (opcode >= CQE_ERROR_OPCODE)
1353                 ib_cqe_p->is_error = 1;
1354         else
1355                 ib_cqe_p->is_error = 0;
1356
1357         ib_cqe_p->is_send =
1358             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st, s);
1359         wqe_addr_ba =
1360             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st,
1361                    wqe_adr) << 6;
1362         ib_cqe_p->wqe = bus_to_virt(wqe_addr_ba);
1363
1364 //      if (ib_cqe_p->is_send) {
1365 //              be_to_cpu_buf(ib_cqe_p->wqe, sizeof(struct ud_send_wqe_st));
1366 //      }
1367 //      else {
1368 //              be_to_cpu_buf(ib_cqe_p->wqe, sizeof(struct recv_wqe_st));
1369 //      }
1370         ib_cqe_p->count =
1371             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st,
1372                    byte_cnt);
1373 }
1374
1375 static int ib_poll_cq(void *cqh, struct ib_cqe_st *ib_cqe_p, u8 * num_cqes)
1376 {
1377         int rc;
1378         union cqe_st cqe;
1379         struct cq_st *cq = cqh;
1380         __u8 opcode;
1381
1382         rc = poll_cq(cq, &cqe, num_cqes);
1383         if (rc || ((*num_cqes) == 0)) {
1384                 return rc;
1385         }
1386
1387         dev2ib_cqe(ib_cqe_p, &cqe);
1388
1389         opcode =
1390             EX_FLD(cqe.good_cqe, tavorprm_completion_queue_entry_st, opcode);
1391         if (opcode >= CQE_ERROR_OPCODE) {
1392                 struct ud_send_wqe_st *wqe_p, wqe;
1393                 __u32 *ptr;
1394                 unsigned int i;
1395
1396                 wqe_p =
1397                     bus_to_virt(EX_FLD
1398                                 (cqe.error_cqe,
1399                                  tavorprm_completion_with_error_st,
1400                                  wqe_addr) << 6);
1401                 eprintf("syndrome=0x%lx",
1402                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1403                                syndrome));
1404                 eprintf("wqe_addr=0x%lx", wqe_p);
1405                 eprintf("wqe_size=0x%lx",
1406                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1407                                wqe_size));
1408                 eprintf("myqpn=0x%lx",
1409                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1410                                myqpn));
1411                 eprintf("db_cnt=0x%lx",
1412                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1413                                db_cnt));
1414                 memcpy(&wqe, wqe_p, sizeof wqe);
1415                 be_to_cpu_buf(&wqe, sizeof wqe);
1416
1417                 eprintf("dumping wqe...");
1418                 ptr = (__u32 *) (&wqe);
1419                 for (i = 0; i < sizeof wqe; i += 4) {
1420                         printf("%lx : ", ptr[i >> 2]);
1421                 }
1422
1423         }
1424
1425         return rc;
1426 }
1427
1428 /* always work on ipoib qp */
1429 static int add_qp_to_mcast_group(union ib_gid_u mcast_gid, __u8 add)
1430 {
1431         void *mg;
1432         __u8 *tmp;
1433         int rc;
1434         __u16 mgid_hash;
1435         void *mgmqp_p;
1436
1437         tmp = dev_buffers_p->inprm_buf;
1438         memcpy(tmp, mcast_gid.raw, 16);
1439         be_to_cpu_buf(tmp, 16);
1440         rc = cmd_mgid_hash(tmp, &mgid_hash);
1441         if (!rc) {
1442                 mg = (void *)dev_buffers_p->inprm_buf;
1443                 memset(mg, 0, MT_STRUCT_SIZE(tavorprm_mgm_entry_st));
1444                 INS_FLD(mcast_gid.as_u32.dw[0], mg, tavorprm_mgm_entry_st, mgid_128_96);        // memcpy(&mg->mgid_128_96, &mcast_gid.raw[0], 4);
1445                 INS_FLD(mcast_gid.as_u32.dw[1], mg, tavorprm_mgm_entry_st, mgid_95_64); // memcpy(&mg->mgid_95_64, &mcast_gid.raw[4], 4);
1446                 INS_FLD(mcast_gid.as_u32.dw[2], mg, tavorprm_mgm_entry_st, mgid_63_32); //memcpy(&mg->mgid_63_32, &mcast_gid.raw[8], 4);
1447                 INS_FLD(mcast_gid.as_u32.dw[3], mg, tavorprm_mgm_entry_st, mgid_31_0);  //memcpy(&mg->mgid_31_0, &mcast_gid.raw[12], 4);
1448                 be_to_cpu_buf(mg + MT_BYTE_OFFSET(tavorprm_mgm_entry_st, mgid_128_96), 16);     //be_to_cpu_buf(&mg->mgid_128_96, 16);
1449                 mgmqp_p = mg + MT_BYTE_OFFSET(tavorprm_mgm_entry_st, mgmqp_0);
1450                 INS_FLD(dev_ib_data.ipoib_qp.qpn, mgmqp_p, tavorprm_mgmqp_st, qpn_i);   //mg->mgmqp[0].qpn = dev_ib_data.ipoib_qp.qpn;
1451                 INS_FLD(add, mgmqp_p, tavorprm_mgmqp_st, qi);   //mg->mgmqp[0].valid = add ? 1 : 0;
1452                 rc = cmd_write_mgm(mg, mgid_hash);
1453         }
1454         return rc;
1455 }
1456
1457 static int clear_interrupt(void)
1458 {
1459         __u32 ecr;
1460         int ret = 0;
1461
1462         if (gw_read_cr(0x80704, &ecr)) {
1463                 eprintf("");
1464         } else {
1465                 if (ecr) {
1466                         ret = 1;
1467                 }
1468         }
1469         gw_write_cr(0xf00d8, 0x80000000);       /* clear int */
1470         gw_write_cr(0x8070c, 0xffffffff);
1471
1472         return ret;
1473 }
1474
1475 static struct ud_send_wqe_st *alloc_send_wqe(udqp_t qph)
1476 {
1477         struct udqp_st *qp = qph;
1478         __u8 new_entry;
1479         struct ud_send_wqe_st *wqe;
1480
1481         if (qp->snd_wqe_cur_free == 0) {
1482                 return NULL;
1483         }
1484         new_entry = qp->snd_wqe_alloc_idx;
1485
1486         wqe = &qp->snd_wq[new_entry].wqe;
1487         qp->snd_wqe_cur_free--;
1488         qp->snd_wqe_alloc_idx = (qp->snd_wqe_alloc_idx + 1) % qp->max_snd_wqes;
1489
1490         memset(wqe, 0, sizeof *wqe);
1491
1492         wqe->mpointer[0].local_addr_l = virt_to_bus(qp->snd_bufs[new_entry]);
1493
1494         return wqe;
1495 }
1496
1497 /*
1498  *  alloc_rcv_wqe
1499  *
1500  *  Note: since we work directly on the work queue, wqes
1501  *        are left in big endian
1502  */
1503 static struct recv_wqe_st *alloc_rcv_wqe(struct udqp_st *qp)
1504 {
1505         __u8 new_entry;
1506         struct recv_wqe_st *wqe;
1507
1508         if (qp->recv_wqe_cur_free == 0) {
1509                 return NULL;
1510         }
1511
1512         new_entry = qp->recv_wqe_alloc_idx;
1513         wqe = &qp->rcv_wq[new_entry].wqe;
1514
1515         qp->recv_wqe_cur_free--;
1516         qp->recv_wqe_alloc_idx =
1517             (qp->recv_wqe_alloc_idx + 1) % qp->max_recv_wqes;
1518
1519         memset(wqe, 0, sizeof *wqe);
1520
1521         /* GRH is always required */
1522         wqe->mpointer[0].local_addr_h = 0;
1523         wqe->mpointer[0].local_addr_l = virt_to_bus(qp->rcv_bufs[new_entry]);
1524         wqe->mpointer[0].lkey = dev_ib_data.mkey;
1525         wqe->mpointer[0].byte_count = GRH_SIZE;
1526
1527         wqe->mpointer[1].local_addr_h = 0;
1528         wqe->mpointer[1].local_addr_l =
1529             virt_to_bus(qp->rcv_bufs[new_entry] + GRH_SIZE);
1530         wqe->mpointer[1].lkey = dev_ib_data.mkey;
1531         wqe->mpointer[1].byte_count = qp->rcv_buf_sz;
1532
1533         tprintf("rcv_buf=%lx\n", qp->rcv_bufs[new_entry]);
1534
1535         /* we do it only on the data segment since the control
1536            segment is always owned by HW */
1537         cpu_to_be_buf(wqe, sizeof *wqe);
1538
1539 //      tprintf("alloc wqe= 0x%x", wqe);
1540         return wqe;
1541 }
1542
1543 static int free_send_wqe(struct ud_send_wqe_st *wqe)
1544 {
1545         union ud_send_wqe_u *wqe_u;
1546         struct udqp_st *qp;
1547
1548         wqe_u = (union ud_send_wqe_u *)wqe;
1549         qp = wqe_u->wqe_cont.qp;
1550
1551         if (qp->snd_wqe_cur_free >= qp->max_snd_wqes) {
1552                 return -1;
1553         }
1554
1555         qp->snd_wqe_cur_free++;
1556
1557         return 0;
1558 }
1559
1560 static int free_rcv_wqe(struct recv_wqe_st *wqe)
1561 {
1562         union recv_wqe_u *wqe_u;
1563         struct udqp_st *qp;
1564
1565         wqe_u = (union recv_wqe_u *)wqe;
1566         qp = wqe_u->wqe_cont.qp;
1567
1568         if (qp->recv_wqe_cur_free >= qp->max_recv_wqes) {
1569                 return -1;
1570         }
1571
1572         qp->recv_wqe_cur_free++;
1573
1574         return 0;
1575 }
1576
1577 static int free_wqe(void *wqe)
1578 {
1579         int rc = 0;
1580         struct recv_wqe_st *rcv_wqe;
1581
1582 //      tprintf("free wqe= 0x%x", wqe);
1583         if ((wqe >= (void *)(dev_ib_data.ipoib_qp.rcv_wq)) &&
1584             (wqe <
1585              (void *)(&dev_ib_data.ipoib_qp.rcv_wq[NUM_IPOIB_RCV_WQES]))) {
1586                 /* ipoib receive wqe */
1587                 free_rcv_wqe(wqe);
1588                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.ipoib_qp);
1589                 if (rcv_wqe) {
1590                         rc = post_rcv_buf(&dev_ib_data.ipoib_qp, rcv_wqe);
1591                         if (rc) {
1592                                 eprintf("");
1593                         }
1594                 }
1595         } else if (wqe >= (void *)(dev_ib_data.ipoib_qp.snd_wq) &&
1596                    wqe <
1597                    (void *)(&dev_ib_data.ipoib_qp.snd_wq[NUM_IPOIB_SND_WQES])) {
1598                 /* ipoib send wqe */
1599                 free_send_wqe(wqe);
1600         } else if (wqe >= (void *)(dev_ib_data.mads_qp.rcv_wq) &&
1601                    wqe <
1602                    (void *)(&dev_ib_data.mads_qp.rcv_wq[NUM_MADS_RCV_WQES])) {
1603                 /* mads receive wqe */
1604                 free_rcv_wqe(wqe);
1605                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.mads_qp);
1606                 if (rcv_wqe) {
1607                         rc = post_rcv_buf(&dev_ib_data.mads_qp, rcv_wqe);
1608                         if (rc) {
1609                                 eprintf("");
1610                         }
1611                 }
1612         } else if (wqe >= (void *)(dev_ib_data.mads_qp.snd_wq) &&
1613                    wqe <
1614                    (void *)(&dev_ib_data.mads_qp.snd_wq[NUM_MADS_SND_WQES])) {
1615                 /* mads send wqe */
1616                 free_send_wqe(wqe);
1617         } else {
1618                 rc = -1;
1619                 eprintf("");
1620         }
1621
1622         return rc;
1623 }
1624
1625 static int update_eq_cons_idx(struct eq_st *eq)
1626 {
1627         struct eq_dbell_st dbell;
1628         int rc;
1629
1630         memset(&dbell, 0, sizeof dbell);
1631         INS_FLD(dev_ib_data.eq.eqn, &dbell, tavorprm_eq_cmd_doorbell_st, eqn);
1632         INS_FLD(EQ_DBELL_CMD_SET_CONS_IDX, &dbell, tavorprm_eq_cmd_doorbell_st,
1633                 eq_cmd);
1634         INS_FLD(eq->cons_idx, &dbell, tavorprm_eq_cmd_doorbell_st, eq_param);
1635         rc = cmd_post_doorbell(&dbell, EQ_DBELL_OFFSET);
1636
1637         return rc;
1638 }
1639
1640 static void dev2ib_eqe(struct ib_eqe_st *ib_eqe_p, void *eqe_p)
1641 {
1642         void *tmp;
1643
1644         ib_eqe_p->event_type =
1645             EX_FLD(eqe_p, tavorprm_event_queue_entry_st, event_type);
1646
1647         tmp = eqe_p + MT_BYTE_OFFSET(tavorprm_event_queue_entry_st, event_data);
1648         ib_eqe_p->cqn = EX_FLD(tmp, tavorprm_completion_event_st, cqn);
1649 }
1650
1651 static int poll_eq(struct ib_eqe_st *ib_eqe_p, __u8 * num_eqes)
1652 {
1653         struct eqe_t eqe;
1654         __u8 owner;
1655         int rc;
1656         __u32 *ptr;
1657         struct eq_st *eq = &dev_ib_data.eq;
1658
1659         ptr = (__u32 *) (&(eq->eq_buf[eq->cons_idx]));
1660         tprintf("cons)idx=%d, addr(eqe)=%x, val=0x%x", eq->cons_idx, virt_to_bus(ptr), ptr[7]);
1661         owner = (ptr[7] & 0x80000000) ? OWNER_HW : OWNER_SW;
1662         if (owner == OWNER_SW) {
1663         tprintf("got eqe");
1664                 eqe = eq->eq_buf[eq->cons_idx];
1665                 be_to_cpu_buf(&eqe, sizeof(eqe));
1666                 dev2ib_eqe(ib_eqe_p, &eqe);
1667                 ptr[7] |= 0x80000000;
1668                 eq->eq_buf[eq->cons_idx] = eqe;
1669                 eq->cons_idx = (eq->cons_idx + 1) % eq->eq_size;
1670                 rc = update_eq_cons_idx(eq);
1671                 if (rc) {
1672                         return -1;
1673                 }
1674                 *num_eqes = 1;
1675         } else {
1676                 *num_eqes = 0;
1677         }
1678         return 0;
1679 }
1680
1681 static int ib_device_close(void)
1682 {
1683         iounmap(tavor_pci_dev.uar);
1684         iounmap(tavor_pci_dev.cr_space);
1685         iounmap(dev_ib_data.error_buf_addr);
1686         return 0;
1687 }
1688
1689 static __u32 dev_get_qpn(void *qph)
1690 {
1691         struct udqp_st *qp = qph;
1692
1693         return qp->qpn;
1694 }
1695
1696 static void dev_post_dbell(void *dbell, __u32 offset)
1697 {
1698         __u32 *ptr;
1699         unsigned long address;
1700
1701         ptr = dbell;
1702         tprintf("ptr[0]= 0x%lx", ptr[0]);
1703         tprintf("ptr[1]= 0x%lx", ptr[1]);
1704         address = (unsigned long)(tavor_pci_dev.uar) + offset;
1705         tprintf("va=0x%lx pa=0x%lx", address,
1706                 virt_to_bus((const void *)address));
1707         writel(htonl(ptr[0]), tavor_pci_dev.uar + offset);
1708         barrier();
1709         address += 4;
1710         tprintf("va=0x%lx pa=0x%lx", address,
1711                 virt_to_bus((const void *)address));
1712         writel(htonl(ptr[1]), tavor_pci_dev.uar + offset + 4);
1713 }
1714
1715