[image] Allow for zero embedded images
[people/asdlkf/gpxe.git] / src / drivers / net / mlx_ipoib / ib_mt23108.c
1 /*
2   This software is available to you under a choice of one of two
3   licenses.  You may choose to be licensed under the terms of the GNU
4   General Public License (GPL) Version 2, available at
5   <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
6   license, available in the LICENSE.TXT file accompanying this
7   software.  These details are also available at
8   <http://openib.org/license.html>.
9
10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17   SOFTWARE.
18
19   Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
20 */
21
22 #include "mt23108.h"
23 #include "ib_driver.h"
24 #include <gpxe/pci.h>
25
26 struct device_buffers_st {
27         union recv_wqe_u mads_qp_rcv_queue[NUM_MADS_RCV_WQES]
28             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
29         union recv_wqe_u ipoib_qp_rcv_queue[NUM_IPOIB_RCV_WQES]
30             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
31         union ud_send_wqe_u mads_qp_snd_queue[NUM_MADS_SND_WQES]
32             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
33         union ud_send_wqe_u ipoib_qp_snd_queue[NUM_IPOIB_SND_WQES]
34             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
35         u8 inprm_buf[INPRM_BUF_SZ] __attribute__ ((aligned(INPRM_BUF_ALIGN)));
36         u8 outprm_buf[OUTPRM_BUF_SZ]
37             __attribute__ ((aligned(OUTPRM_BUF_ALIGN)));
38         struct eqe_t eq_buf[1 << LOG2_EQ_SZ]
39             __attribute__ ((aligned(sizeof(struct eqe_t))));
40         union cqe_st mads_snd_cq_buf[NUM_MADS_SND_CQES]
41             __attribute__ ((aligned(sizeof(union cqe_st))));
42         union cqe_st ipoib_snd_cq_buf[NUM_IPOIB_SND_CQES]
43             __attribute__ ((aligned(sizeof(union cqe_st))));
44         union cqe_st mads_rcv_cq_buf[NUM_MADS_RCV_CQES]
45             __attribute__ ((aligned(sizeof(union cqe_st))));
46         union cqe_st ipoib_rcv_cq_buf[NUM_IPOIB_RCV_CQES]
47             __attribute__ ((aligned(sizeof(union cqe_st))));
48         union ud_av_u av_array[NUM_AVS]
49             __attribute__ ((aligned(ADDRESS_VECTOR_ST_ALIGN)));
50 } __attribute__ ((packed));
51
52 #define STRUCT_ALIGN_SZ 4096
53 #define SRC_BUF_SZ (sizeof(struct device_buffers_st) + STRUCT_ALIGN_SZ - 1)
54
55 /* the following must be kept in this order
56    for the memory region to cover the buffers */
57 static u8 src_buf[SRC_BUF_SZ];
58 static struct ib_buffers_st ib_buffers;
59 static __u32 memreg_size;
60 /* end of order constraint */
61
62 static struct dev_pci_struct tavor_pci_dev;
63 static struct device_buffers_st *dev_buffers_p;
64 static struct device_ib_data_st dev_ib_data;
65
66 static int gw_write_cr(__u32 addr, __u32 data)
67 {
68         writel(htonl(data), tavor_pci_dev.cr_space + addr);
69         return 0;
70 }
71
72 static int gw_read_cr(__u32 addr, __u32 * result)
73 {
74         *result = ntohl(readl(tavor_pci_dev.cr_space + addr));
75         return 0;
76 }
77
78 static int reset_hca(void)
79 {
80         return gw_write_cr(TAVOR_RESET_OFFSET, 1);
81 }
82
83 static int find_mlx_bridge(__u8 hca_bus, __u8 * br_bus_p, __u8 * br_devfn_p)
84 {
85         int bus;
86         int dev;
87         int devfn;
88         int rc;
89         __u16 vendor, dev_id;
90         __u8 sec_bus;
91
92         for (bus = 0; bus < 256; ++bus) {
93                 for (dev = 0; dev < 32; ++dev) {
94                         devfn = (dev << 3);
95                         rc = pcibios_read_config_word(bus, devfn, PCI_VENDOR_ID,
96                                                       &vendor);
97                         if (rc)
98                                 return rc;
99
100                         if (vendor != MELLANOX_VENDOR_ID)
101                                 continue;
102
103                         rc = pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID,
104                                                       &dev_id);
105                         if (rc)
106                                 return rc;
107
108                         if (dev_id != TAVOR_BRIDGE_DEVICE_ID)
109                                 continue;
110
111                         rc = pcibios_read_config_byte(bus, devfn,
112                                                       PCI_SECONDARY_BUS,
113                                                       &sec_bus);
114                         if (rc)
115                                 return rc;
116
117                         if (sec_bus == hca_bus) {
118                                 *br_bus_p = bus;
119                                 *br_devfn_p = devfn;
120                                 return 0;
121                         }
122                 }
123         }
124
125         return -1;
126 }
127
128 static int ib_device_init(struct pci_device *dev)
129 {
130         int i;
131         int rc;
132         __u8 br_bus, br_devfn;
133
134         tprintf("");
135
136         memset(&dev_ib_data, 0, sizeof dev_ib_data);
137
138         /* save bars */
139         tprintf("bus=%d devfn=0x%x", dev->bus, dev->devfn);
140         for (i = 0; i < 6; ++i) {
141                 tavor_pci_dev.dev.bar[i] =
142                     pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
143                 tprintf("bar[%d]= 0x%08lx", i, tavor_pci_dev.dev.bar[i]);
144         }
145
146         tprintf("");
147         /* save config space */
148         for (i = 0; i < 64; ++i) {
149                 rc = pci_read_config_dword(dev, i << 2,
150                                            &tavor_pci_dev.dev.
151                                            dev_config_space[i]);
152                 if (rc) {
153                         eprintf("");
154                         return rc;
155                 }
156                 tprintf("config[%d]= 0x%08lx", i << 2,
157                         tavor_pci_dev.dev.dev_config_space[i]);
158         }
159
160         tprintf("");
161         tavor_pci_dev.dev.dev = dev;
162
163         tprintf("");
164         if (dev->dev_id == TAVOR_DEVICE_ID) {
165
166                 rc = find_mlx_bridge(dev->bus, &br_bus, &br_devfn);
167                 if (rc) {
168                         eprintf("");
169                         return rc;
170                 }
171
172                 tavor_pci_dev.br.bus = br_bus;
173                 tavor_pci_dev.br.devfn = br_devfn;
174
175                 tprintf("bus=%d devfn=0x%x", br_bus, br_devfn);
176                 /* save config space */
177                 for (i = 0; i < 64; ++i) {
178                         rc = pcibios_read_config_dword(br_bus, br_devfn, i << 2,
179                                                        &tavor_pci_dev.br.
180                                                        dev_config_space[i]);
181                         if (rc) {
182                                 eprintf("");
183                                 return rc;
184                         }
185                         tprintf("config[%d]= 0x%08lx", i << 2,
186                                 tavor_pci_dev.br.dev_config_space[i]);
187                 }
188         }
189
190         tprintf("");
191
192         /* map cr-space */
193         tavor_pci_dev.cr_space = ioremap(tavor_pci_dev.dev.bar[0], 0x100000);
194         if (!tavor_pci_dev.cr_space) {
195                 eprintf("");
196                 return -1;
197         }
198
199         /* map uar */
200         tavor_pci_dev.uar =
201             ioremap(tavor_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, 0x1000);
202         if (!tavor_pci_dev.uar) {
203                 eprintf("");
204                 return -1;
205         }
206         tprintf("uar_base (pa:va) = 0x%lx 0x%lx",
207                 tavor_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, tavor_pci_dev.uar);
208
209         tprintf("");
210
211         return 0;
212 }
213
214 static inline unsigned long lalign(unsigned long buf, unsigned long align)
215 {
216         return (unsigned long)((buf + align - 1) &
217                                (~(((unsigned long)align) - 1)));
218 }
219
220 static int init_dev_data(void)
221 {
222         unsigned long tmp;
223
224         tmp = lalign(virt_to_bus(src_buf), STRUCT_ALIGN_SZ);
225
226         dev_buffers_p = bus_to_virt(tmp);
227         memreg_size = (__u32) (&memreg_size) - (__u32) dev_buffers_p;
228         tprintf("src_buf=0x%lx, dev_buffers_p=0x%lx, memreg_size=0x%x", src_buf,
229                 dev_buffers_p, memreg_size);
230
231         return 0;
232 }
233
234 static int restore_config(void)
235 {
236         int i;
237         int rc;
238
239         if (tavor_pci_dev.dev.dev->dev_id == TAVOR_DEVICE_ID) {
240                 for (i = 0; i < 64; ++i) {
241                         rc = pcibios_write_config_dword(tavor_pci_dev.br.bus,
242                                                         tavor_pci_dev.br.devfn,
243                                                         i << 2,
244                                                         tavor_pci_dev.br.
245                                                         dev_config_space[i]);
246                         if (rc) {
247                                 return rc;
248                         }
249                 }
250         }
251
252         for (i = 0; i < 64; ++i) {
253                 if (i != 22 && i != 23) {
254                         rc = pci_write_config_dword(tavor_pci_dev.dev.dev,
255                                                     i << 2,
256                                                     tavor_pci_dev.dev.
257                                                     dev_config_space[i]);
258                         if (rc) {
259                                 return rc;
260                         }
261                 }
262         }
263         return 0;
264 }
265
266 static void prep_init_hca_buf(const struct init_hca_st *init_hca_p, void *buf)
267 {
268         /*struct init_hca_param_st */ void *p = buf;
269         void *tmp;
270
271         memset(buf, 0, MT_STRUCT_SIZE(tavorprm_init_hca_st));
272
273         tmp =
274             p + MT_BYTE_OFFSET(tavorprm_init_hca_st,
275                                qpc_eec_cqc_eqc_rdb_parameters);
276
277         INS_FLD(init_hca_p->qpc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
278                 qpc_base_addr_h);
279         INS_FLD(init_hca_p->
280                 qpc_base_addr_l >> (32 -
281                                     (MT_BIT_SIZE
282                                      (tavorprm_qpcbaseaddr_st,
283                                       qpc_base_addr_l))), tmp,
284                 tavorprm_qpcbaseaddr_st, qpc_base_addr_l);
285         INS_FLD(init_hca_p->log_num_of_qp, tmp, tavorprm_qpcbaseaddr_st,
286                 log_num_of_qp);
287
288         INS_FLD(init_hca_p->cqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
289                 cqc_base_addr_h);
290         INS_FLD(init_hca_p->
291                 cqc_base_addr_l >> (32 -
292                                     (MT_BIT_SIZE
293                                      (tavorprm_qpcbaseaddr_st,
294                                       cqc_base_addr_l))), tmp,
295                 tavorprm_qpcbaseaddr_st, cqc_base_addr_l);
296         INS_FLD(init_hca_p->log_num_of_cq, tmp, tavorprm_qpcbaseaddr_st,
297                 log_num_of_cq);
298
299         INS_FLD(init_hca_p->eqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
300                 eqc_base_addr_h);
301         INS_FLD(init_hca_p->
302                 eqc_base_addr_l >> (32 -
303                                     (MT_BIT_SIZE
304                                      (tavorprm_qpcbaseaddr_st,
305                                       eqc_base_addr_l))), tmp,
306                 tavorprm_qpcbaseaddr_st, eqc_base_addr_l);
307         INS_FLD(LOG2_EQS, tmp, tavorprm_qpcbaseaddr_st, log_num_eq);
308
309         INS_FLD(init_hca_p->srqc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
310                 srqc_base_addr_h);
311         INS_FLD(init_hca_p->
312                 srqc_base_addr_l >> (32 -
313                                      (MT_BIT_SIZE
314                                       (tavorprm_qpcbaseaddr_st,
315                                        srqc_base_addr_l))), tmp,
316                 tavorprm_qpcbaseaddr_st, srqc_base_addr_l);
317         INS_FLD(init_hca_p->log_num_of_srq, tmp, tavorprm_qpcbaseaddr_st,
318                 log_num_of_srq);
319
320         INS_FLD(init_hca_p->eqpc_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
321                 eqpc_base_addr_h);
322         INS_FLD(init_hca_p->eqpc_base_addr_l, tmp, tavorprm_qpcbaseaddr_st,
323                 eqpc_base_addr_l);
324
325         INS_FLD(init_hca_p->eeec_base_addr_h, tmp, tavorprm_qpcbaseaddr_st,
326                 eeec_base_addr_h);
327         INS_FLD(init_hca_p->eeec_base_addr_l, tmp, tavorprm_qpcbaseaddr_st,
328                 eeec_base_addr_l);
329
330         tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, multicast_parameters);
331
332         INS_FLD(init_hca_p->mc_base_addr_h, tmp, tavorprm_multicastparam_st,
333                 mc_base_addr_h);
334         INS_FLD(init_hca_p->mc_base_addr_l, tmp, tavorprm_multicastparam_st,
335                 mc_base_addr_l);
336
337         INS_FLD(init_hca_p->log_mc_table_entry_sz, tmp,
338                 tavorprm_multicastparam_st, log_mc_table_entry_sz);
339         INS_FLD(init_hca_p->log_mc_table_sz, tmp, tavorprm_multicastparam_st,
340                 log_mc_table_sz);
341         INS_FLD(init_hca_p->mc_table_hash_sz, tmp, tavorprm_multicastparam_st,
342                 mc_table_hash_sz);
343
344         tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, tpt_parameters);
345
346         INS_FLD(init_hca_p->mpt_base_addr_h, tmp, tavorprm_tptparams_st,
347                 mpt_base_adr_h);
348         INS_FLD(init_hca_p->mpt_base_addr_l, tmp, tavorprm_tptparams_st,
349                 mpt_base_adr_l);
350         INS_FLD(init_hca_p->log_mpt_sz, tmp, tavorprm_tptparams_st, log_mpt_sz);
351
352         INS_FLD(init_hca_p->mtt_base_addr_h, tmp, tavorprm_tptparams_st,
353                 mtt_base_addr_h);
354         INS_FLD(init_hca_p->mtt_base_addr_l, tmp, tavorprm_tptparams_st,
355                 mtt_base_addr_l);
356
357         tmp = p + MT_BYTE_OFFSET(tavorprm_init_hca_st, uar_parameters);
358         INS_FLD(tavor_pci_dev.dev.bar[3], tmp, tavorprm_uar_params_st,
359                 uar_base_addr_h);
360         INS_FLD(tavor_pci_dev.dev.bar[2] & 0xfff00000, tmp,
361                 tavorprm_uar_params_st, uar_base_addr_l);
362
363 }
364
365 static void prep_sw2hw_mpt_buf(void *buf, __u32 mkey)
366 {
367         INS_FLD(1, buf, tavorprm_mpt_st, m_io);
368         INS_FLD(1, buf, tavorprm_mpt_st, lw);
369         INS_FLD(1, buf, tavorprm_mpt_st, lr);
370         INS_FLD(1, buf, tavorprm_mpt_st, pa);
371         INS_FLD(1, buf, tavorprm_mpt_st, r_w);
372
373         INS_FLD(mkey, buf, tavorprm_mpt_st, mem_key);
374         INS_FLD(GLOBAL_PD, buf, tavorprm_mpt_st, pd);
375
376         INS_FLD(virt_to_bus(dev_buffers_p), buf, tavorprm_mpt_st,
377                 start_address_l);
378         INS_FLD(memreg_size, buf, tavorprm_mpt_st, reg_wnd_len_l);
379 }
380
381 static void prep_sw2hw_eq_buf(void *buf, struct eqe_t *eq)
382 {
383         memset(buf, 0, MT_STRUCT_SIZE(tavorprm_eqc_st));
384
385         INS_FLD(2, buf, tavorprm_eqc_st, st); /* fired */
386         INS_FLD(virt_to_bus(eq), buf, tavorprm_eqc_st, start_address_l);
387         INS_FLD(LOG2_EQ_SZ, buf, tavorprm_eqc_st, log_eq_size);
388         INS_FLD(UAR_IDX, buf, tavorprm_eqc_st, usr_page);
389         INS_FLD(GLOBAL_PD, buf, tavorprm_eqc_st, pd);
390         INS_FLD(dev_ib_data.mkey, buf, tavorprm_eqc_st, lkey);
391 }
392
393 static void init_eq_buf(void *eq_buf)
394 {
395         int num_eqes = 1 << LOG2_EQ_SZ;
396
397         memset(eq_buf, 0xff, num_eqes * sizeof(struct eqe_t));
398 }
399
400 static void prep_init_ib_buf(void *buf)
401 {
402         __u32 *ptr = (__u32 *) buf;
403
404         ptr[0] = 0x4310;
405         ptr[1] = 1;
406         ptr[2] = 64;
407 }
408
409 static void prep_sw2hw_cq_buf(void *buf, __u8 eqn, __u32 cqn,
410                               union cqe_st *cq_buf)
411 {
412         __u32 *ptr = (__u32 *) buf;
413
414         ptr[2] = virt_to_bus(cq_buf);
415         ptr[3] = (LOG2_CQ_SZ << 24) | UAR_IDX;
416         ptr[4] = eqn;
417         ptr[5] = eqn;
418         ptr[6] = dev_ib_data.pd;
419         ptr[7] = dev_ib_data.mkey;
420         ptr[12] = cqn;
421 }
422
423 static void prep_rst2init_qpee_buf(void *buf, __u32 snd_cqn, __u32 rcv_cqn,
424                                    __u32 qkey)
425 {
426         struct qp_ee_state_tarnisition_st *prm;
427         void *tmp;
428
429         prm = (struct qp_ee_state_tarnisition_st *)buf;
430
431         INS_FLD(3, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, st);     /* service type = UD */
432         INS_FLD(3, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, pm_state);       /* required for UD QP */
433         INS_FLD(UAR_IDX, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
434                 usr_page);
435         INS_FLD(dev_ib_data.pd, &prm->ctx,
436                 tavorprm_queue_pair_ee_context_entry_st, pd);
437         INS_FLD(dev_ib_data.mkey, &prm->ctx,
438                 tavorprm_queue_pair_ee_context_entry_st, wqe_lkey);
439         INS_FLD(1, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, ssc);    /* generate send CQE */
440         INS_FLD(1, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, rsc);    /* generate receive CQE */
441         INS_FLD(snd_cqn, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
442                 cqn_snd);
443         INS_FLD(rcv_cqn, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
444                 cqn_rcv);
445         INS_FLD(qkey, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
446                 q_key);
447
448         tmp =
449             (void *)(&prm->ctx) +
450             MT_BYTE_OFFSET(tavorprm_queue_pair_ee_context_entry_st,
451                            primary_address_path);
452         INS_FLD(dev_ib_data.port, tmp, tavorprm_address_path_st, port_number);
453
454         INS_FLD(4, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, mtu);
455         INS_FLD(0xb, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
456                 msg_max);
457 }
458
459 static void prep_init2rtr_qpee_buf(void *buf)
460 {
461         struct qp_ee_state_tarnisition_st *prm;
462
463         prm = (struct qp_ee_state_tarnisition_st *)buf;
464
465         INS_FLD(4, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st, mtu);
466         INS_FLD(0xb, &prm->ctx, tavorprm_queue_pair_ee_context_entry_st,
467                 msg_max);
468 }
469
470 static void init_av_array()
471 {
472         int i;
473
474         dev_ib_data.udav.av_array = dev_buffers_p->av_array;
475         dev_ib_data.udav.udav_next_free = FL_EOL;
476         for (i = 0; i < NUM_AVS; ++i) {
477                 dev_ib_data.udav.av_array[i].ud_av.next_free =
478                     dev_ib_data.udav.udav_next_free;
479                 dev_ib_data.udav.udav_next_free = i;
480         }
481         tprintf("dev_ib_data.udav.udav_next_free=%d", i);
482 }
483
484 static int setup_hca(__u8 port, void **eq_p)
485 {
486         int rc;
487         __u32 key, in_key;
488         __u32 *inprm;
489         struct eqe_t *eq_buf;
490         __u32 event_mask;
491         void *cfg;
492         int ret = 0;
493         __u8 eqn;
494         struct dev_lim_st dev_lim;
495         struct init_hca_st init_hca;
496         __u32 offset, base_h, base_l;
497         const __u32 delta = 0x400000;
498         struct query_fw_st qfw;
499
500         tprintf("called");
501
502         init_dev_data();
503
504         rc = reset_hca();
505         if (rc) {
506                 ret = -1;
507                 eprintf("");
508                 goto exit;
509         } else {
510                 tprintf("reset_hca() success");
511         }
512
513         mdelay(1000);           /* wait for 1 sec */
514
515         rc = restore_config();
516         if (rc) {
517                 ret = -1;
518                 eprintf("");
519                 goto exit;
520         } else {
521                 tprintf("restore_config() success");
522         }
523
524         dev_ib_data.pd = GLOBAL_PD;
525         dev_ib_data.port = port;
526
527         /* execute system enable command */
528         rc = cmd_sys_en();
529         if (rc) {
530                 ret = -1;
531                 eprintf("");
532                 goto exit;
533         } else {
534                 tprintf("cmd_sys_en() success");
535         }
536
537         rc= cmd_query_fw(&qfw);
538         if (rc) {
539                 ret = -1;
540                 eprintf("");
541                 goto exit;
542         } else {
543                 tprintf("cmd_query_fw() success");
544
545                 if (print_info) {
546                         printf("FW ver = %d.%d.%d\n",
547                                 qfw.fw_rev_major,
548                                 qfw.fw_rev_minor,
549                                 qfw.fw_rev_subminor);
550                 }
551                 tprintf("fw_rev_major=%d", qfw.fw_rev_major);
552                 tprintf("fw_rev_minor=%d", qfw.fw_rev_minor);
553                 tprintf("fw_rev_subminor=%d", qfw.fw_rev_subminor);
554                 tprintf("error_buf_start_h=0x%x", qfw.error_buf_start_h);
555                 tprintf("error_buf_start_l=0x%x", qfw.error_buf_start_l);
556                 tprintf("error_buf_size=%d", qfw.error_buf_size);
557         }
558
559         if (qfw.error_buf_start_h) {
560                 eprintf("too high physical address");
561                 ret = -1;
562                 goto exit;
563         }
564
565         dev_ib_data.error_buf_addr= ioremap(qfw.error_buf_start_l,
566                                                                                 qfw.error_buf_size*4);
567         dev_ib_data.error_buf_size= qfw.error_buf_size;
568         if (!dev_ib_data.error_buf_addr) {
569                 eprintf("");
570                 ret = -1;
571                 goto exit;
572         }
573
574
575         rc = cmd_query_dev_lim(&dev_lim);
576         if (rc) {
577                 ret = -1;
578                 eprintf("");
579                 goto exit;
580         } else {
581                 tprintf("cmd_query_dev_lim() success");
582                 tprintf("log2_rsvd_qps=%x", dev_lim.log2_rsvd_qps);
583                 tprintf("qpc_entry_sz=%x", dev_lim.qpc_entry_sz);
584                 tprintf("log2_rsvd_srqs=%x", dev_lim.log2_rsvd_srqs);
585                 tprintf("srq_entry_sz=%x", dev_lim.srq_entry_sz);
586                 tprintf("log2_rsvd_ees=%x", dev_lim.log2_rsvd_ees);
587                 tprintf("eec_entry_sz=%x", dev_lim.eec_entry_sz);
588                 tprintf("log2_rsvd_cqs=%x", dev_lim.log2_rsvd_cqs);
589                 tprintf("cqc_entry_sz=%x", dev_lim.cqc_entry_sz);
590                 tprintf("log2_rsvd_mtts=%x", dev_lim.log2_rsvd_mtts);
591                 tprintf("mtt_entry_sz=%x", dev_lim.mtt_entry_sz);
592                 tprintf("log2_rsvd_mrws=%x", dev_lim.log2_rsvd_mrws);
593                 tprintf("mpt_entry_sz=%x", dev_lim.mpt_entry_sz);
594                 tprintf("eqc_entry_sz=%x", dev_lim.eqc_entry_sz);
595         }
596
597         /* set the qp and cq numbers according
598            to the results of query_dev_lim */
599         dev_ib_data.mads_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
600             +QPN_BASE + MADS_QPN_SN;
601         dev_ib_data.ipoib_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
602             +QPN_BASE + IPOIB_QPN_SN;
603
604         dev_ib_data.mads_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
605             MADS_SND_CQN_SN;
606         dev_ib_data.mads_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
607             MADS_RCV_CQN_SN;
608
609         dev_ib_data.ipoib_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
610             IPOIB_SND_CQN_SN;
611         dev_ib_data.ipoib_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
612             IPOIB_RCV_CQN_SN;
613
614         /* disable SRQ */
615         cfg = (void *)dev_buffers_p->inprm_buf;
616         memset(cfg, 0, MT_STRUCT_SIZE(tavorprm_mod_stat_cfg_st));
617         INS_FLD(1, cfg, tavorprm_mod_stat_cfg_st, srq_m);       //cfg->srq_m = 1;
618         rc = cmd_mod_stat_cfg(cfg);
619         if (rc) {
620                 ret = -1;
621                 eprintf("");
622                 goto exit;
623         } else {
624                 tprintf("cmd_mod_stat_cfg() success");
625         }
626
627         /* prepare the init_hca params to pass
628            to prep_init_hca_buf */
629         memset(&init_hca, 0, sizeof init_hca);
630         offset = 0;
631         base_h = tavor_pci_dev.dev.bar[5] & 0xfffffff0;
632         base_l = tavor_pci_dev.dev.bar[4] & 0xfffffff0;
633
634         tprintf("base_h=0x%lx, base_l=0x%lx", base_h, base_l);
635
636         init_hca.qpc_base_addr_h = base_h;
637         init_hca.qpc_base_addr_l = base_l + offset;
638         init_hca.log_num_of_qp = dev_lim.log2_rsvd_qps + 1;
639         offset += delta;
640
641         init_hca.eec_base_addr_h = base_h;
642         init_hca.eec_base_addr_l = base_l + offset;
643         init_hca.log_num_of_ee = dev_lim.log2_rsvd_ees;
644         offset += delta;
645
646         init_hca.srqc_base_addr_h = base_h;
647         init_hca.srqc_base_addr_l = base_l + offset;
648         init_hca.log_num_of_srq = dev_lim.log2_rsvd_srqs;
649         offset += delta;
650
651         init_hca.cqc_base_addr_h = base_h;
652         init_hca.cqc_base_addr_l = base_l + offset;
653         init_hca.log_num_of_cq = dev_lim.log2_rsvd_cqs + 1;
654         offset += delta;
655
656         init_hca.eqpc_base_addr_h = base_h;
657         init_hca.eqpc_base_addr_l = base_l + offset;
658         offset += delta;
659
660         init_hca.eeec_base_addr_h = base_h;
661         init_hca.eeec_base_addr_l = base_l + offset;
662         offset += delta;
663
664         init_hca.eqc_base_addr_h = base_h;
665         init_hca.eqc_base_addr_l = base_l + offset;
666         init_hca.log_num_of_eq = LOG2_EQS;
667         offset += delta;
668
669         init_hca.rdb_base_addr_h = base_h;
670         init_hca.rdb_base_addr_l = base_l + offset;
671         offset += delta;
672
673         init_hca.mc_base_addr_h = base_h;
674         init_hca.mc_base_addr_l = base_l + offset;
675         init_hca.log_mc_table_entry_sz = LOG2_MC_ENTRY;
676         init_hca.mc_table_hash_sz = 0;
677         init_hca.log_mc_table_sz = LOG2_MC_GROUPS;
678         offset += delta;
679
680         init_hca.mpt_base_addr_h = base_h;
681         init_hca.mpt_base_addr_l = base_l + offset;
682         init_hca.log_mpt_sz = dev_lim.log2_rsvd_mrws + 1;
683         offset += delta;
684
685         init_hca.mtt_base_addr_h = base_h;
686         init_hca.mtt_base_addr_l = base_l + offset;
687
688         /* this buffer is used for all the commands */
689         inprm = (void *)dev_buffers_p->inprm_buf;
690         /* excute init_hca command */
691         prep_init_hca_buf(&init_hca, inprm);
692
693         rc = cmd_init_hca(inprm, MT_STRUCT_SIZE(tavorprm_init_hca_st));
694         if (rc) {
695                 ret = -1;
696                 eprintf("");
697                 goto undo_sys_en;
698         } else
699                 tprintf("cmd_init_hca() success");
700
701         /* register a single memory region which covers
702            4 GB of the address space which will be used
703            throughout the driver */
704         memset(inprm, 0, SW2HW_MPT_IBUF_SZ);
705         in_key = MKEY_PREFIX + (1 << dev_lim.log2_rsvd_mrws);
706         prep_sw2hw_mpt_buf(inprm, in_key);
707         rc = cmd_sw2hw_mpt(&key, in_key, inprm, SW2HW_MPT_IBUF_SZ);
708         if (rc) {
709                 ret = -1;
710                 eprintf("");
711                 goto undo_init_hca;
712         } else {
713                 tprintf("cmd_sw2hw_mpt() success, key=0x%lx", key);
714         }
715         dev_ib_data.mkey = key;
716
717         eqn = EQN;
718         /* allocate a single EQ which will receive 
719            all the events */
720         eq_buf = dev_buffers_p->eq_buf;
721         init_eq_buf(eq_buf);    /* put in HW ownership */
722         prep_sw2hw_eq_buf(inprm, eq_buf);
723         rc = cmd_sw2hw_eq(SW2HW_EQ_IBUF_SZ);
724         if (rc) {
725                 ret = -1;
726                 eprintf("");
727                 goto undo_sw2hw_mpt;
728         } else
729                 tprintf("cmd_sw2hw_eq() success");
730
731         event_mask = (1 << XDEV_EV_TYPE_CQ_COMP) |
732             (1 << XDEV_EV_TYPE_CQ_ERR) |
733             (1 << XDEV_EV_TYPE_LOCAL_WQ_CATAS_ERR) |
734             (1 << XDEV_EV_TYPE_PORT_ERR) |
735             (1 << XDEV_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR) |
736             (1 << XDEV_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR) |
737             (1 << TAVOR_IF_EV_TYPE_OVERRUN);
738         rc = cmd_map_eq(eqn, event_mask, 1);
739         if (rc) {
740                 ret = -1;
741                 eprintf("");
742                 goto undo_sw2hw_eq;
743         } else
744                 tprintf("cmd_map_eq() success");
745
746         dev_ib_data.eq.eqn = eqn;
747         dev_ib_data.eq.eq_buf = eq_buf;
748         dev_ib_data.eq.cons_idx = 0;
749         dev_ib_data.eq.eq_size = 1 << LOG2_EQ_SZ;
750         *eq_p = &dev_ib_data.eq;
751
752         memset(inprm, 0, INIT_IB_IBUF_SZ);
753         prep_init_ib_buf(inprm);
754         rc = cmd_init_ib(port, inprm, INIT_IB_IBUF_SZ);
755         if (rc) {
756                 ret = -1;
757                 eprintf("");
758                 goto undo_sw2hw_eq;
759         } else
760                 tprintf("cmd_init_ib() success");
761
762         init_av_array();
763         tprintf("init_av_array() done");
764
765         goto exit;
766
767       undo_sw2hw_eq:
768         rc = cmd_hw2sw_eq(EQN);
769         if (rc) {
770                 eprintf("");
771         } else
772                 tprintf("cmd_hw2sw_eq() success");
773
774       undo_sw2hw_mpt:
775         rc = cmd_hw2sw_mpt(key);
776         if (rc)
777                 eprintf("");
778         else
779                 tprintf("cmd_hw2sw_mpt() success key=0x%lx", key);
780
781       undo_init_hca:
782         rc = cmd_close_hca(0);
783         if (rc) {
784                 eprintf("");
785                 goto undo_sys_en;
786         } else
787                 tprintf("cmd_close_hca() success");
788
789       undo_sys_en:
790         rc = cmd_sys_dis();
791         if (rc) {
792                 eprintf("");
793                 goto undo_sys_en;
794         } else
795                 tprintf("cmd_sys_dis() success");
796         goto exit;
797
798       exit:
799         return ret;
800 }
801
802 static void *get_inprm_buf(void)
803 {
804         return dev_buffers_p->inprm_buf;
805 }
806
807 static void *get_outprm_buf(void)
808 {
809         return dev_buffers_p->outprm_buf;
810 }
811
812 static void *get_send_wqe_buf(void *wqe, __u8 index)
813 {
814         struct ud_send_wqe_st *snd_wqe = wqe;
815
816         return bus_to_virt(snd_wqe->mpointer[index].local_addr_l);
817 }
818
819 static void *get_rcv_wqe_buf(void *wqe, __u8 index)
820 {
821         struct recv_wqe_st *rcv_wqe = wqe;
822
823         return bus_to_virt(be32_to_cpu(rcv_wqe->mpointer[index].local_addr_l));
824 }
825
826 static void modify_av_params(struct ud_av_st *av,
827                              __u16 dlid,
828                              __u8 g,
829                              __u8 sl, __u8 rate, union ib_gid_u *gid, __u32 qpn)
830 {
831         memset(&av->av, 0, sizeof av->av);
832         INS_FLD(dev_ib_data.port, &av->av, tavorprm_ud_address_vector_st,
833                 port_number);
834         INS_FLD(dev_ib_data.pd, &av->av, tavorprm_ud_address_vector_st, pd);
835         INS_FLD(dlid, &av->av, tavorprm_ud_address_vector_st, rlid);
836         INS_FLD(g, &av->av, tavorprm_ud_address_vector_st, g);
837         INS_FLD(sl, &av->av, tavorprm_ud_address_vector_st, sl);
838         INS_FLD(3, &av->av, tavorprm_ud_address_vector_st, msg);
839
840         if (rate >= 3)
841                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, max_stat_rate);      /* 4x */
842         else
843                 INS_FLD(1, &av->av, tavorprm_ud_address_vector_st, max_stat_rate);      /* 1x */
844
845         cpu_to_be_buf(&av->av, sizeof(av->av));
846         if (g) {
847                 if (gid) {
848                         INS_FLD(*((__u32 *) (&gid->raw[0])), &av->av,
849                                 tavorprm_ud_address_vector_st, rgid_127_96);
850                         INS_FLD(*((__u32 *) (&gid->raw[4])), &av->av,
851                                 tavorprm_ud_address_vector_st, rgid_95_64);
852                         INS_FLD(*((__u32 *) (&gid->raw[8])), &av->av,
853                                 tavorprm_ud_address_vector_st, rgid_63_32);
854                         INS_FLD(*((__u32 *) (&gid->raw[12])), &av->av,
855                                 tavorprm_ud_address_vector_st, rgid_31_0);
856                 } else {
857                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
858                                 rgid_127_96);
859                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
860                                 rgid_95_64);
861                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
862                                 rgid_63_32);
863                         INS_FLD(0, &av->av, tavorprm_ud_address_vector_st,
864                                 rgid_31_0);
865                 }
866         } else {
867                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_127_96);
868                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_95_64);
869                 INS_FLD(0, &av->av, tavorprm_ud_address_vector_st, rgid_63_32);
870                 INS_FLD(2, &av->av, tavorprm_ud_address_vector_st, rgid_31_0);
871         }
872         av->dest_qp = qpn;
873 }
874
875 static void init_cq_buf(union cqe_st *cq_buf, __u8 num_cqes)
876 {
877         memset(cq_buf, 0xff, num_cqes * sizeof cq_buf[0]);
878 }
879
880 static int post_rcv_buf(struct udqp_st *qp, struct recv_wqe_st *rcv_wqe)
881 {
882         struct recv_doorbell_st dbell;
883         int rc;
884         __u32 tmp[2];
885         struct recv_wqe_st *tmp_wqe = (struct recv_wqe_st *)tmp;
886         __u32 *ptr_dst;
887
888         memset(&dbell, 0, sizeof dbell);
889         INS_FLD(sizeof(*rcv_wqe) >> 4, &dbell, tavorprm_receive_doorbell_st,
890                 nds);
891         INS_FLD(virt_to_bus(rcv_wqe) >> 6, &dbell, tavorprm_receive_doorbell_st,
892                 nda);
893         INS_FLD(qp->qpn, &dbell, tavorprm_receive_doorbell_st, qpn);
894         INS_FLD(1, &dbell, tavorprm_receive_doorbell_st, credits);
895
896         if (qp->last_posted_rcv_wqe) {
897                 memcpy(tmp, qp->last_posted_rcv_wqe, sizeof(tmp));
898                 be_to_cpu_buf(tmp, sizeof(tmp));
899                 INS_FLD(1, tmp_wqe->next, wqe_segment_next_st, dbd);
900                 INS_FLD(sizeof(*rcv_wqe) >> 4, tmp_wqe->next,
901                         wqe_segment_next_st, nds);
902                 INS_FLD(virt_to_bus(rcv_wqe) >> 6, tmp_wqe->next,
903                         wqe_segment_next_st, nda_31_6);
904                 /* this is not really opcode but since the struct
905                    is used for both send and receive, in receive this bit must be 1
906                    which coinsides with nopcode */
907                 INS_FLD(1, tmp_wqe->next, wqe_segment_next_st, nopcode);
908
909                 cpu_to_be_buf(tmp, sizeof(tmp));
910
911                 ptr_dst = (__u32 *) (qp->last_posted_rcv_wqe);
912                 ptr_dst[0] = tmp[0];
913                 ptr_dst[1] = tmp[1];
914         }
915         rc = cmd_post_doorbell(&dbell, POST_RCV_OFFSET);
916         if (!rc) {
917                 qp->last_posted_rcv_wqe = rcv_wqe;
918         }
919
920         return rc;
921 }
922
923 static int post_send_req(void *qph, void *wqeh, __u8 num_gather)
924 {
925         struct send_doorbell_st dbell;
926         int rc;
927         struct udqp_st *qp = qph;
928         struct ud_send_wqe_st *snd_wqe = wqeh;
929         struct next_control_seg_st tmp;
930         __u32 *psrc, *pdst;
931         __u32 nds;
932
933         tprintf("snd_wqe=0x%lx, virt_to_bus(snd_wqe)=0x%lx", snd_wqe,
934                 virt_to_bus(snd_wqe));
935
936         memset(&dbell, 0, sizeof dbell);
937         INS_FLD(XDEV_NOPCODE_SEND, &dbell, tavorprm_send_doorbell_st, nopcode);
938         INS_FLD(1, &dbell, tavorprm_send_doorbell_st, f);
939         INS_FLD(virt_to_bus(snd_wqe) >> 6, &dbell, tavorprm_send_doorbell_st,
940                 nda);
941         nds =
942             (sizeof(snd_wqe->next) + sizeof(snd_wqe->udseg) +
943              sizeof(snd_wqe->mpointer[0]) * num_gather) >> 4;
944         INS_FLD(nds, &dbell, tavorprm_send_doorbell_st, nds);
945         INS_FLD(qp->qpn, &dbell, tavorprm_send_doorbell_st, qpn);
946
947         tprintf("0= %lx", ((__u32 *) ((void *)(&dbell)))[0]);
948         tprintf("1= %lx", ((__u32 *) ((void *)(&dbell)))[1]);
949
950         if (qp->last_posted_snd_wqe) {
951                 memcpy(&tmp, &qp->last_posted_snd_wqe->next, sizeof tmp);
952                 be_to_cpu_buf(&tmp, sizeof tmp);
953                 INS_FLD(1, &tmp, wqe_segment_next_st, dbd);
954                 INS_FLD(virt_to_bus(snd_wqe) >> 6, &tmp, wqe_segment_next_st,
955                         nda_31_6);
956                 INS_FLD(nds, &tmp, wqe_segment_next_st, nds);
957
958                 psrc = (__u32 *) (&tmp);
959                 pdst = (__u32 *) (&qp->last_posted_snd_wqe->next);
960                 pdst[0] = htonl(psrc[0]);
961                 pdst[1] = htonl(psrc[1]);
962         }
963
964         rc = cmd_post_doorbell(&dbell, POST_SND_OFFSET);
965         if (!rc) {
966                 qp->last_posted_snd_wqe = snd_wqe;
967         }
968
969         return rc;
970 }
971
972 static int create_mads_qp(void **qp_pp, void **snd_cq_pp, void **rcv_cq_pp)
973 {
974         __u8 i;
975         int rc;
976         struct udqp_st *qp;
977
978         qp = &dev_ib_data.mads_qp;
979
980         /* set the pointer to the receive WQEs buffer */
981         qp->rcv_wq = dev_buffers_p->mads_qp_rcv_queue;
982
983         qp->send_buf_sz = MAD_BUF_SZ;
984         qp->rcv_buf_sz = MAD_BUF_SZ;
985
986         qp->recv_wqe_alloc_idx = 0;
987         qp->max_recv_wqes = NUM_MADS_RCV_WQES;
988         qp->recv_wqe_cur_free = NUM_MADS_RCV_WQES;
989
990         /* iterrate through the list */
991         for (i = 0; i < NUM_MADS_RCV_WQES; ++i) {
992                 /* clear the WQE */
993                 memset(&qp->rcv_wq[i], 0, sizeof(qp->rcv_wq[i]));
994
995                 qp->rcv_wq[i].wqe_cont.qp = qp;
996                 qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
997         }
998
999         /* set the pointer to the send WQEs buffer */
1000         qp->snd_wq = dev_buffers_p->mads_qp_snd_queue;
1001
1002         qp->snd_wqe_alloc_idx = 0;
1003         qp->max_snd_wqes = NUM_MADS_SND_WQES;
1004         qp->snd_wqe_cur_free = NUM_MADS_SND_WQES;
1005
1006         /* iterrate through the list */
1007         for (i = 0; i < NUM_MADS_SND_WQES; ++i) {
1008                 /* clear the WQE */
1009                 memset(&qp->snd_wq[i], 0, sizeof(qp->snd_wq[i]));
1010
1011                 /* link the WQE to the free list */
1012                 qp->snd_wq[i].wqe_cont.qp = qp;
1013                 qp->snd_bufs[i] = ib_buffers.send_mad_buf[i];
1014         }
1015
1016         /* qp number and cq numbers are already set up */
1017         qp->snd_cq.cq_buf = dev_buffers_p->mads_snd_cq_buf;
1018         qp->rcv_cq.cq_buf = dev_buffers_p->mads_rcv_cq_buf;
1019         qp->snd_cq.num_cqes = NUM_MADS_SND_CQES;
1020         qp->rcv_cq.num_cqes = NUM_MADS_RCV_CQES;
1021         qp->qkey = GLOBAL_QKEY;
1022         rc = create_udqp(qp);
1023         if (!rc) {
1024                 *qp_pp = qp;
1025                 *snd_cq_pp = &qp->snd_cq;
1026                 *rcv_cq_pp = &qp->rcv_cq;
1027         }
1028
1029         return rc;
1030 }
1031
1032 static int create_ipoib_qp(void **qp_pp,
1033                            void **snd_cq_pp, void **rcv_cq_pp, __u32 qkey)
1034 {
1035         __u8 i;
1036         int rc;
1037         struct udqp_st *qp;
1038         qp = &dev_ib_data.ipoib_qp;
1039
1040         /* set the pointer to the receive WQEs buffer */
1041         qp->rcv_wq = dev_buffers_p->ipoib_qp_rcv_queue;
1042
1043         qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
1044
1045         qp->recv_wqe_alloc_idx = 0;
1046         qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
1047         qp->recv_wqe_cur_free = NUM_IPOIB_RCV_WQES;
1048
1049         /* iterrate through the list */
1050         for (i = 0; i < NUM_IPOIB_RCV_WQES; ++i) {
1051                 /* clear the WQE */
1052                 memset(&qp->rcv_wq[i], 0, sizeof(qp->rcv_wq[i]));
1053
1054                 /* update data */
1055                 qp->rcv_wq[i].wqe_cont.qp = qp;
1056                 qp->rcv_bufs[i] = ib_buffers.ipoib_rcv_buf[i];
1057                 tprintf("rcv_buf=%lx", qp->rcv_bufs[i]);
1058         }
1059
1060         /* init send queue WQEs list */
1061         /* set the list empty */
1062         qp->snd_wqe_alloc_idx = 0;
1063         qp->max_snd_wqes = NUM_IPOIB_SND_WQES;
1064         qp->snd_wqe_cur_free = NUM_IPOIB_SND_WQES;
1065
1066         /* set the pointer to the send WQEs buffer */
1067         qp->snd_wq = dev_buffers_p->ipoib_qp_snd_queue;
1068
1069         /* iterrate through the list */
1070         for (i = 0; i < NUM_IPOIB_SND_WQES; ++i) {
1071                 /* clear the WQE */
1072                 memset(&qp->snd_wq[i], 0, sizeof(qp->snd_wq[i]));
1073
1074                 /* update data */
1075                 qp->snd_wq[i].wqe_cont.qp = qp;
1076                 qp->snd_bufs[i] = ib_buffers.send_ipoib_buf[i];
1077                 qp->send_buf_sz = 4;
1078         }
1079
1080         /* qp number and cq numbers are already set up */
1081
1082         qp->snd_cq.cq_buf = dev_buffers_p->ipoib_snd_cq_buf;
1083         qp->rcv_cq.cq_buf = dev_buffers_p->ipoib_rcv_cq_buf;
1084         qp->snd_cq.num_cqes = NUM_IPOIB_SND_CQES;
1085         qp->rcv_cq.num_cqes = NUM_IPOIB_RCV_CQES;
1086         qp->qkey = qkey;
1087         rc = create_udqp(qp);
1088         if (!rc) {
1089                 *qp_pp = qp;
1090                 *snd_cq_pp = &qp->snd_cq;
1091                 *rcv_cq_pp = &qp->rcv_cq;
1092         }
1093
1094         return rc;
1095 }
1096
1097 static int create_udqp(struct udqp_st *qp)
1098 {
1099         int rc, ret = 0;
1100         void *inprm;
1101         struct recv_wqe_st *rcv_wqe;
1102
1103         inprm = dev_buffers_p->inprm_buf;
1104
1105         /* create send CQ */
1106         init_cq_buf(qp->snd_cq.cq_buf, qp->snd_cq.num_cqes);
1107         qp->snd_cq.cons_idx = 0;
1108         memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
1109         prep_sw2hw_cq_buf(inprm, dev_ib_data.eq.eqn, qp->snd_cq.cqn,
1110                           qp->snd_cq.cq_buf);
1111         rc = cmd_sw2hw_cq(qp->snd_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1112         if (rc) {
1113                 ret = -1;
1114                 eprintf("");
1115                 goto exit;
1116         }
1117
1118         /* create receive CQ */
1119         init_cq_buf(qp->rcv_cq.cq_buf, qp->rcv_cq.num_cqes);
1120         qp->rcv_cq.cons_idx = 0;
1121         memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
1122         prep_sw2hw_cq_buf(inprm, dev_ib_data.eq.eqn, qp->rcv_cq.cqn,
1123                           qp->rcv_cq.cq_buf);
1124         rc = cmd_sw2hw_cq(qp->rcv_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1125         if (rc) {
1126                 ret = -1;
1127                 eprintf("");
1128                 goto undo_snd_cq;
1129         }
1130
1131         memset(inprm, 0, QPCTX_IBUF_SZ);
1132         prep_rst2init_qpee_buf(inprm, qp->snd_cq.cqn, qp->rcv_cq.cqn, qp->qkey);
1133         rc = cmd_rst2init_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1134         if (rc) {
1135                 ret = -1;
1136                 eprintf("");
1137                 goto undo_rcv_cq;
1138         }
1139
1140         qp->last_posted_rcv_wqe = NULL;
1141         qp->last_posted_snd_wqe = NULL;
1142
1143         /* post all the buffers to the receive queue */
1144         while (1) {
1145                 /* allocate wqe */
1146                 rcv_wqe = alloc_rcv_wqe(qp);
1147                 if (!rcv_wqe)
1148                         break;
1149
1150                 /* post the buffer */
1151                 rc = post_rcv_buf(qp, rcv_wqe);
1152                 if (rc) {
1153                         ret = -1;
1154                         eprintf("");
1155                         goto undo_rcv_cq;
1156                 }
1157         }
1158
1159         memset(inprm, 0, QPCTX_IBUF_SZ);
1160         prep_init2rtr_qpee_buf(inprm);
1161         rc = cmd_init2rtr_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1162         if (rc) {
1163                 ret = -1;
1164                 eprintf("");
1165                 goto undo_rcv_cq;
1166         }
1167
1168         memset(inprm, 0, QPCTX_IBUF_SZ);
1169         rc = cmd_rtr2rts_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1170         if (rc) {
1171                 ret = -1;
1172                 eprintf("");
1173                 goto undo_rcv_cq;
1174         }
1175
1176         goto exit;
1177
1178       undo_rcv_cq:
1179         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1180         if (rc)
1181                 eprintf("");
1182
1183       undo_snd_cq:
1184         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1185         if (rc)
1186                 eprintf("");
1187
1188       exit:
1189         return ret;
1190 }
1191
1192 static int destroy_udqp(struct udqp_st *qp)
1193 {
1194         int rc;
1195
1196         rc = cmd_2err_qpee(qp->qpn);
1197         if (rc) {
1198                 eprintf("");
1199                 return rc;
1200         }
1201         tprintf("cmd_2err_qpee(0x%lx) success", qp->qpn);
1202
1203         rc = cmd_2rst_qpee(qp->qpn);
1204         if (rc) {
1205                 eprintf("");
1206                 return rc;
1207         }
1208         tprintf("cmd_2rst_qpee(0x%lx) success", qp->qpn);
1209
1210         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1211         if (rc) {
1212                 eprintf("");
1213                 return rc;
1214         }
1215         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->snd_cq.cqn);
1216
1217         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1218         if (rc) {
1219                 eprintf("");
1220                 return rc;
1221         }
1222         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->rcv_cq.cqn);
1223
1224         return rc;
1225 }
1226
1227 static void prep_send_wqe_buf(void *qph,
1228                               void *avh,
1229                               void *wqeh,
1230                               const void *buf,
1231                               unsigned int offset, __u16 len, __u8 e)
1232 {
1233         struct udqp_st *qp = qph;
1234         struct ud_av_st *av = avh;
1235         struct ud_send_wqe_st *wqe = wqeh;
1236
1237         INS_FLD(e, wqe->next.control, wqe_segment_ctrl_send_st, e);
1238         INS_FLD(1, wqe->next.control, wqe_segment_ctrl_send_st, always1);
1239
1240         wqe->udseg.av_add_h = 0;
1241         wqe->udseg.av_add_l = virt_to_bus(&av->av);
1242         wqe->udseg.dest_qp = av->dest_qp;
1243         wqe->udseg.lkey = dev_ib_data.mkey;
1244         wqe->udseg.qkey = qp->qkey;
1245
1246         if (buf) {
1247                 memcpy(bus_to_virt(wqe->mpointer[0].local_addr_l) + offset, buf,
1248                        len);
1249                 len += offset;
1250         }
1251         wqe->mpointer[0].byte_count = len;
1252         wqe->mpointer[0].lkey = dev_ib_data.mkey;
1253
1254         cpu_to_be_buf(wqe, sizeof *wqe);
1255 }
1256
1257 static void *alloc_ud_av(void)
1258 {
1259         u8 next_free;
1260
1261         if (dev_ib_data.udav.udav_next_free == FL_EOL) {
1262                 return NULL;
1263         }
1264
1265         next_free = dev_ib_data.udav.udav_next_free;
1266         dev_ib_data.udav.udav_next_free =
1267             dev_buffers_p->av_array[next_free].ud_av.next_free;
1268         tprintf("allocated udav %d", next_free);
1269         return &dev_buffers_p->av_array[next_free].ud_av;
1270 }
1271
1272 static void free_ud_av(void *avh)
1273 {
1274         union ud_av_u *avu;
1275         __u8 idx, old_idx;
1276         struct ud_av_st *av = avh;
1277
1278         avu = (union ud_av_u *)av;
1279
1280         idx = avu - dev_buffers_p->av_array;
1281         tprintf("freeing udav idx=%d", idx);
1282         old_idx = dev_ib_data.udav.udav_next_free;
1283         dev_ib_data.udav.udav_next_free = idx;
1284         avu->ud_av.next_free = old_idx;
1285 }
1286
1287 static int update_cq_cons_idx(struct cq_st *cq)
1288 {
1289         struct cq_dbell_st dbell;
1290         int rc;
1291
1292         memset(&dbell, 0, sizeof dbell);
1293         INS_FLD(cq->cqn, &dbell, tavorprm_cq_cmd_doorbell_st, cqn);
1294         INS_FLD(CQ_DBELL_CMD_INC_CONS_IDX, &dbell, tavorprm_cq_cmd_doorbell_st,
1295                 cq_cmd);
1296         rc = cmd_post_doorbell(&dbell, CQ_DBELL_OFFSET);
1297         return rc;
1298 }
1299
1300 static int poll_cq(void *cqh, union cqe_st *cqe_p, u8 * num_cqes)
1301 {
1302         union cqe_st cqe;
1303         int rc;
1304         u32 *ptr;
1305         struct cq_st *cq = cqh;
1306
1307         if (cq->cqn < 0x80 || cq->cqn > 0x83) {
1308                 eprintf("");
1309                 return -1;
1310         }
1311         ptr = (u32 *) (&(cq->cq_buf[cq->cons_idx]));
1312         barrier();
1313         if ((ptr[7] & 0x80000000) == 0) {
1314                 cqe = cq->cq_buf[cq->cons_idx];
1315                 be_to_cpu_buf(&cqe, sizeof(cqe));
1316                 *cqe_p = cqe;
1317                 ptr[7] = 0x80000000;
1318                 barrier();
1319                 cq->cons_idx = (cq->cons_idx + 1) % cq->num_cqes;
1320                 rc = update_cq_cons_idx(cq);
1321                 if (rc) {
1322                         return rc;
1323                 }
1324                 *num_cqes = 1;
1325         } else
1326                 *num_cqes = 0;
1327
1328         return 0;
1329 }
1330
1331 static void dev2ib_cqe(struct ib_cqe_st *ib_cqe_p, union cqe_st *cqe_p)
1332 {
1333         __u8 opcode;
1334         __u32 wqe_addr_ba;
1335
1336         opcode =
1337             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st, opcode);
1338         if (opcode >= CQE_ERROR_OPCODE)
1339                 ib_cqe_p->is_error = 1;
1340         else
1341                 ib_cqe_p->is_error = 0;
1342
1343         ib_cqe_p->is_send =
1344             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st, s);
1345         wqe_addr_ba =
1346             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st,
1347                    wqe_adr) << 6;
1348         ib_cqe_p->wqe = bus_to_virt(wqe_addr_ba);
1349
1350 //      if (ib_cqe_p->is_send) {
1351 //              be_to_cpu_buf(ib_cqe_p->wqe, sizeof(struct ud_send_wqe_st));
1352 //      }
1353 //      else {
1354 //              be_to_cpu_buf(ib_cqe_p->wqe, sizeof(struct recv_wqe_st));
1355 //      }
1356         ib_cqe_p->count =
1357             EX_FLD(cqe_p->good_cqe, tavorprm_completion_queue_entry_st,
1358                    byte_cnt);
1359 }
1360
1361 static int ib_poll_cq(void *cqh, struct ib_cqe_st *ib_cqe_p, u8 * num_cqes)
1362 {
1363         int rc;
1364         union cqe_st cqe;
1365         struct cq_st *cq = cqh;
1366         __u8 opcode;
1367
1368         rc = poll_cq(cq, &cqe, num_cqes);
1369         if (rc || ((*num_cqes) == 0)) {
1370                 return rc;
1371         }
1372
1373         dev2ib_cqe(ib_cqe_p, &cqe);
1374
1375         opcode =
1376             EX_FLD(cqe.good_cqe, tavorprm_completion_queue_entry_st, opcode);
1377         if (opcode >= CQE_ERROR_OPCODE) {
1378                 struct ud_send_wqe_st *wqe_p, wqe;
1379                 __u32 *ptr;
1380                 unsigned int i;
1381
1382                 wqe_p =
1383                     bus_to_virt(EX_FLD
1384                                 (cqe.error_cqe,
1385                                  tavorprm_completion_with_error_st,
1386                                  wqe_addr) << 6);
1387                 eprintf("syndrome=0x%lx",
1388                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1389                                syndrome));
1390                 eprintf("wqe_addr=0x%lx", wqe_p);
1391                 eprintf("wqe_size=0x%lx",
1392                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1393                                wqe_size));
1394                 eprintf("myqpn=0x%lx",
1395                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1396                                myqpn));
1397                 eprintf("db_cnt=0x%lx",
1398                         EX_FLD(cqe.error_cqe, tavorprm_completion_with_error_st,
1399                                db_cnt));
1400                 memcpy(&wqe, wqe_p, sizeof wqe);
1401                 be_to_cpu_buf(&wqe, sizeof wqe);
1402
1403                 eprintf("dumping wqe...");
1404                 ptr = (__u32 *) (&wqe);
1405                 for (i = 0; i < sizeof wqe; i += 4) {
1406                         printf("%lx : ", ptr[i >> 2]);
1407                 }
1408
1409         }
1410
1411         return rc;
1412 }
1413
1414 /* always work on ipoib qp */
1415 static int add_qp_to_mcast_group(union ib_gid_u mcast_gid, __u8 add)
1416 {
1417         void *mg;
1418         __u8 *tmp;
1419         int rc;
1420         __u16 mgid_hash;
1421         void *mgmqp_p;
1422
1423         tmp = dev_buffers_p->inprm_buf;
1424         memcpy(tmp, mcast_gid.raw, 16);
1425         be_to_cpu_buf(tmp, 16);
1426         rc = cmd_mgid_hash(tmp, &mgid_hash);
1427         if (!rc) {
1428                 mg = (void *)dev_buffers_p->inprm_buf;
1429                 memset(mg, 0, MT_STRUCT_SIZE(tavorprm_mgm_entry_st));
1430                 INS_FLD(mcast_gid.as_u32.dw[0], mg, tavorprm_mgm_entry_st, mgid_128_96);        // memcpy(&mg->mgid_128_96, &mcast_gid.raw[0], 4);
1431                 INS_FLD(mcast_gid.as_u32.dw[1], mg, tavorprm_mgm_entry_st, mgid_95_64); // memcpy(&mg->mgid_95_64, &mcast_gid.raw[4], 4);
1432                 INS_FLD(mcast_gid.as_u32.dw[2], mg, tavorprm_mgm_entry_st, mgid_63_32); //memcpy(&mg->mgid_63_32, &mcast_gid.raw[8], 4);
1433                 INS_FLD(mcast_gid.as_u32.dw[3], mg, tavorprm_mgm_entry_st, mgid_31_0);  //memcpy(&mg->mgid_31_0, &mcast_gid.raw[12], 4);
1434                 be_to_cpu_buf(mg + MT_BYTE_OFFSET(tavorprm_mgm_entry_st, mgid_128_96), 16);     //be_to_cpu_buf(&mg->mgid_128_96, 16);
1435                 mgmqp_p = mg + MT_BYTE_OFFSET(tavorprm_mgm_entry_st, mgmqp_0);
1436                 INS_FLD(dev_ib_data.ipoib_qp.qpn, mgmqp_p, tavorprm_mgmqp_st, qpn_i);   //mg->mgmqp[0].qpn = dev_ib_data.ipoib_qp.qpn;
1437                 INS_FLD(add, mgmqp_p, tavorprm_mgmqp_st, qi);   //mg->mgmqp[0].valid = add ? 1 : 0;
1438                 rc = cmd_write_mgm(mg, mgid_hash);
1439         }
1440         return rc;
1441 }
1442
1443 static int clear_interrupt(void)
1444 {
1445         __u32 ecr;
1446         int ret = 0;
1447
1448         if (gw_read_cr(0x80704, &ecr)) {
1449                 eprintf("");
1450         } else {
1451                 if (ecr) {
1452                         ret = 1;
1453                 }
1454         }
1455         gw_write_cr(0xf00d8, 0x80000000);       /* clear int */
1456         gw_write_cr(0x8070c, 0xffffffff);
1457
1458         return ret;
1459 }
1460
1461 static struct ud_send_wqe_st *alloc_send_wqe(udqp_t qph)
1462 {
1463         struct udqp_st *qp = qph;
1464         __u8 new_entry;
1465         struct ud_send_wqe_st *wqe;
1466
1467         if (qp->snd_wqe_cur_free == 0) {
1468                 return NULL;
1469         }
1470         new_entry = qp->snd_wqe_alloc_idx;
1471
1472         wqe = &qp->snd_wq[new_entry].wqe;
1473         qp->snd_wqe_cur_free--;
1474         qp->snd_wqe_alloc_idx = (qp->snd_wqe_alloc_idx + 1) % qp->max_snd_wqes;
1475
1476         memset(wqe, 0, sizeof *wqe);
1477
1478         wqe->mpointer[0].local_addr_l = virt_to_bus(qp->snd_bufs[new_entry]);
1479
1480         return wqe;
1481 }
1482
1483 /*
1484  *  alloc_rcv_wqe
1485  *
1486  *  Note: since we work directly on the work queue, wqes
1487  *        are left in big endian
1488  */
1489 static struct recv_wqe_st *alloc_rcv_wqe(struct udqp_st *qp)
1490 {
1491         __u8 new_entry;
1492         struct recv_wqe_st *wqe;
1493
1494         if (qp->recv_wqe_cur_free == 0) {
1495                 return NULL;
1496         }
1497
1498         new_entry = qp->recv_wqe_alloc_idx;
1499         wqe = &qp->rcv_wq[new_entry].wqe;
1500
1501         qp->recv_wqe_cur_free--;
1502         qp->recv_wqe_alloc_idx =
1503             (qp->recv_wqe_alloc_idx + 1) % qp->max_recv_wqes;
1504
1505         memset(wqe, 0, sizeof *wqe);
1506
1507         /* GRH is always required */
1508         wqe->mpointer[0].local_addr_h = 0;
1509         wqe->mpointer[0].local_addr_l = virt_to_bus(qp->rcv_bufs[new_entry]);
1510         wqe->mpointer[0].lkey = dev_ib_data.mkey;
1511         wqe->mpointer[0].byte_count = GRH_SIZE;
1512
1513         wqe->mpointer[1].local_addr_h = 0;
1514         wqe->mpointer[1].local_addr_l =
1515             virt_to_bus(qp->rcv_bufs[new_entry] + GRH_SIZE);
1516         wqe->mpointer[1].lkey = dev_ib_data.mkey;
1517         wqe->mpointer[1].byte_count = qp->rcv_buf_sz;
1518
1519         tprintf("rcv_buf=%lx\n", qp->rcv_bufs[new_entry]);
1520
1521         /* we do it only on the data segment since the control
1522            segment is always owned by HW */
1523         cpu_to_be_buf(wqe, sizeof *wqe);
1524
1525 //      tprintf("alloc wqe= 0x%x", wqe);
1526         return wqe;
1527 }
1528
1529 static int free_send_wqe(struct ud_send_wqe_st *wqe)
1530 {
1531         union ud_send_wqe_u *wqe_u;
1532         struct udqp_st *qp;
1533
1534         wqe_u = (union ud_send_wqe_u *)wqe;
1535         qp = wqe_u->wqe_cont.qp;
1536
1537         if (qp->snd_wqe_cur_free >= qp->max_snd_wqes) {
1538                 return -1;
1539         }
1540
1541         qp->snd_wqe_cur_free++;
1542
1543         return 0;
1544 }
1545
1546 static int free_rcv_wqe(struct recv_wqe_st *wqe)
1547 {
1548         union recv_wqe_u *wqe_u;
1549         struct udqp_st *qp;
1550
1551         wqe_u = (union recv_wqe_u *)wqe;
1552         qp = wqe_u->wqe_cont.qp;
1553
1554         if (qp->recv_wqe_cur_free >= qp->max_recv_wqes) {
1555                 return -1;
1556         }
1557
1558         qp->recv_wqe_cur_free++;
1559
1560         return 0;
1561 }
1562
1563 static int free_wqe(void *wqe)
1564 {
1565         int rc = 0;
1566         struct recv_wqe_st *rcv_wqe;
1567
1568 //      tprintf("free wqe= 0x%x", wqe);
1569         if ((wqe >= (void *)(dev_ib_data.ipoib_qp.rcv_wq)) &&
1570             (wqe <
1571              (void *)(&dev_ib_data.ipoib_qp.rcv_wq[NUM_IPOIB_RCV_WQES]))) {
1572                 /* ipoib receive wqe */
1573                 free_rcv_wqe(wqe);
1574                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.ipoib_qp);
1575                 if (rcv_wqe) {
1576                         rc = post_rcv_buf(&dev_ib_data.ipoib_qp, rcv_wqe);
1577                         if (rc) {
1578                                 eprintf("");
1579                         }
1580                 }
1581         } else if (wqe >= (void *)(dev_ib_data.ipoib_qp.snd_wq) &&
1582                    wqe <
1583                    (void *)(&dev_ib_data.ipoib_qp.snd_wq[NUM_IPOIB_SND_WQES])) {
1584                 /* ipoib send wqe */
1585                 free_send_wqe(wqe);
1586         } else if (wqe >= (void *)(dev_ib_data.mads_qp.rcv_wq) &&
1587                    wqe <
1588                    (void *)(&dev_ib_data.mads_qp.rcv_wq[NUM_MADS_RCV_WQES])) {
1589                 /* mads receive wqe */
1590                 free_rcv_wqe(wqe);
1591                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.mads_qp);
1592                 if (rcv_wqe) {
1593                         rc = post_rcv_buf(&dev_ib_data.mads_qp, rcv_wqe);
1594                         if (rc) {
1595                                 eprintf("");
1596                         }
1597                 }
1598         } else if (wqe >= (void *)(dev_ib_data.mads_qp.snd_wq) &&
1599                    wqe <
1600                    (void *)(&dev_ib_data.mads_qp.snd_wq[NUM_MADS_SND_WQES])) {
1601                 /* mads send wqe */
1602                 free_send_wqe(wqe);
1603         } else {
1604                 rc = -1;
1605                 eprintf("");
1606         }
1607
1608         return rc;
1609 }
1610
1611 static int update_eq_cons_idx(struct eq_st *eq)
1612 {
1613         struct eq_dbell_st dbell;
1614         int rc;
1615
1616         memset(&dbell, 0, sizeof dbell);
1617         INS_FLD(dev_ib_data.eq.eqn, &dbell, tavorprm_eq_cmd_doorbell_st, eqn);
1618         INS_FLD(EQ_DBELL_CMD_SET_CONS_IDX, &dbell, tavorprm_eq_cmd_doorbell_st,
1619                 eq_cmd);
1620         INS_FLD(eq->cons_idx, &dbell, tavorprm_eq_cmd_doorbell_st, eq_param);
1621         rc = cmd_post_doorbell(&dbell, EQ_DBELL_OFFSET);
1622
1623         return rc;
1624 }
1625
1626 static void dev2ib_eqe(struct ib_eqe_st *ib_eqe_p, void *eqe_p)
1627 {
1628         void *tmp;
1629
1630         ib_eqe_p->event_type =
1631             EX_FLD(eqe_p, tavorprm_event_queue_entry_st, event_type);
1632
1633         tmp = eqe_p + MT_BYTE_OFFSET(tavorprm_event_queue_entry_st, event_data);
1634         ib_eqe_p->cqn = EX_FLD(tmp, tavorprm_completion_event_st, cqn);
1635 }
1636
1637 static int poll_eq(struct ib_eqe_st *ib_eqe_p, __u8 * num_eqes)
1638 {
1639         struct eqe_t eqe;
1640         __u8 owner;
1641         int rc;
1642         __u32 *ptr;
1643         struct eq_st *eq = &dev_ib_data.eq;
1644
1645         ptr = (__u32 *) (&(eq->eq_buf[eq->cons_idx]));
1646         tprintf("cons)idx=%d, addr(eqe)=%x, val=0x%x", eq->cons_idx, virt_to_bus(ptr), ptr[7]);
1647         owner = (ptr[7] & 0x80000000) ? OWNER_HW : OWNER_SW;
1648         if (owner == OWNER_SW) {
1649         tprintf("got eqe");
1650                 eqe = eq->eq_buf[eq->cons_idx];
1651                 be_to_cpu_buf(&eqe, sizeof(eqe));
1652                 dev2ib_eqe(ib_eqe_p, &eqe);
1653                 ptr[7] |= 0x80000000;
1654                 eq->eq_buf[eq->cons_idx] = eqe;
1655                 eq->cons_idx = (eq->cons_idx + 1) % eq->eq_size;
1656                 rc = update_eq_cons_idx(eq);
1657                 if (rc) {
1658                         return -1;
1659                 }
1660                 *num_eqes = 1;
1661         } else {
1662                 *num_eqes = 0;
1663         }
1664         return 0;
1665 }
1666
1667 static int ib_device_close(void)
1668 {
1669         iounmap(tavor_pci_dev.uar);
1670         iounmap(tavor_pci_dev.cr_space);
1671         iounmap(dev_ib_data.error_buf_addr);
1672         return 0;
1673 }
1674
1675 static __u32 dev_get_qpn(void *qph)
1676 {
1677         struct udqp_st *qp = qph;
1678
1679         return qp->qpn;
1680 }
1681
1682 static void dev_post_dbell(void *dbell, __u32 offset)
1683 {
1684         __u32 *ptr;
1685         unsigned long address;
1686
1687         ptr = dbell;
1688         tprintf("ptr[0]= 0x%lx", ptr[0]);
1689         tprintf("ptr[1]= 0x%lx", ptr[1]);
1690         address = (unsigned long)(tavor_pci_dev.uar) + offset;
1691         tprintf("va=0x%lx pa=0x%lx", address,
1692                 virt_to_bus((const void *)address));
1693         writel(htonl(ptr[0]), tavor_pci_dev.uar + offset);
1694         barrier();
1695         address += 4;
1696         tprintf("va=0x%lx pa=0x%lx", address,
1697                 virt_to_bus((const void *)address));
1698         writel(htonl(ptr[1]), tavor_pci_dev.uar + offset + 4);
1699 }
1700
1701