[image] Allow for zero embedded images
[people/asdlkf/gpxe.git] / src / drivers / net / mlx_ipoib / ib_mt25218.c
1 /*
2   This software is available to you under a choice of one of two
3   licenses.  You may choose to be licensed under the terms of the GNU
4   General Public License (GPL) Version 2, available at
5   <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
6   license, available in the LICENSE.TXT file accompanying this
7   software.  These details are also available at
8   <http://openib.org/license.html>.
9
10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17   SOFTWARE.
18
19   Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
20 */
21
22 #include "mt25218.h"
23 #include "ib_driver.h"
24 #include <gpxe/pci.h>
25
26 #define MOD_INC(counter, max_count) (counter) = ((counter)+1) & ((max_count) - 1)
27
28 #define breakpoint {volatile __u32 *p=(__u32 *)0x1234;printf("breakpoint\n");do {} while((*p) != 0x1234);}
29
30 #define WRITE_BYTE_VOL(addr, off, val) \
31     do { \
32         (*((volatile __u8 *)(((volatile __u8 *)(addr)) + off))) = (val); \
33     } while(0)
34
35 #define WRITE_WORD_VOL(addr, off, val) \
36     do { \
37         (*((volatile __u16 *)(((volatile __u8 *)(addr)) + off))) = (val); \
38     } while(0)
39
40 #define WRITE_DWORD_VOL(addr, off, val) \
41     do { \
42         (*((volatile __u32 *)(((volatile __u8 *)(addr)) + off))) = (val); \
43     } while(0)
44
45 struct device_buffers_st {
46         /* inprm and outprm do not have alignnemet constraint sice that
47            is acheived programatically */
48         u8 inprm_buf[INPRM_BUF_SZ];
49         u8 outprm_buf[OUTPRM_BUF_SZ];
50         union recv_wqe_u mads_qp_rcv_queue[NUM_MADS_RCV_WQES]
51             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
52         union recv_wqe_u ipoib_qp_rcv_queue[NUM_IPOIB_RCV_WQES]
53             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
54         union ud_send_wqe_u mads_qp_snd_queue[NUM_MADS_SND_WQES]
55             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
56         union ud_send_wqe_u ipoib_qp_snd_queue[NUM_IPOIB_SND_WQES]
57             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
58         struct eqe_t eq_buf[1 << LOG2_EQ_SZ]
59             __attribute__ ((aligned(sizeof(struct eqe_t))));
60         union cqe_st mads_snd_cq_buf[NUM_MADS_SND_CQES]
61             __attribute__ ((aligned(sizeof(union cqe_st))));
62         union cqe_st ipoib_snd_cq_buf[NUM_IPOIB_SND_CQES]
63             __attribute__ ((aligned(sizeof(union cqe_st))));
64         union cqe_st mads_rcv_cq_buf[NUM_MADS_RCV_CQES]
65             __attribute__ ((aligned(sizeof(union cqe_st))));
66         union cqe_st ipoib_rcv_cq_buf[NUM_IPOIB_RCV_CQES]
67             __attribute__ ((aligned(sizeof(union cqe_st))));
68         union ud_av_u av_array[NUM_AVS];
69 } __attribute__ ((packed));
70
71 #define STRUCT_ALIGN_SZ 4096
72 #define SRC_BUF_SZ (sizeof(struct device_buffers_st) + STRUCT_ALIGN_SZ - 1)
73
74 /* the following must be kept in this order
75    for the memory region to cover the buffers */
76 static u8 src_buf[SRC_BUF_SZ];
77 static struct ib_buffers_st ib_buffers;
78 static __u32 memreg_size;
79 /* end of order constraint */
80
81 struct phys_mem_desc_st {
82         unsigned long base;
83         unsigned long offset;
84 };
85
86 static struct phys_mem_desc_st phys_mem;
87
88 static struct dev_pci_struct memfree_pci_dev;
89 static struct device_buffers_st *dev_buffers_p;
90 static struct device_ib_data_st dev_ib_data;
91
92 static int gw_write_cr(__u32 addr, __u32 data)
93 {
94         writel(htonl(data), memfree_pci_dev.cr_space + addr);
95         return 0;
96 }
97
98 static int gw_read_cr(__u32 addr, __u32 * result)
99 {
100         *result = ntohl(readl(memfree_pci_dev.cr_space + addr));
101         return 0;
102 }
103
104 static int reset_hca(void)
105 {
106         return gw_write_cr(MEMFREE_RESET_OFFSET, 1);
107 }
108
109 static int ib_device_init(struct pci_device *dev)
110 {
111         int i;
112         int rc;
113
114         tprintf("");
115
116         memset(&dev_ib_data, 0, sizeof dev_ib_data);
117
118         /* save bars */
119         tprintf("bus=%d devfn=0x%x", dev->bus, dev->devfn);
120         for (i = 0; i < 6; ++i) {
121                 memfree_pci_dev.dev.bar[i] =
122                     pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
123                 tprintf("bar[%d]= 0x%08lx", i, memfree_pci_dev.dev.bar[i]);
124         }
125
126         tprintf("");
127         /* save config space */
128         for (i = 0; i < 64; ++i) {
129                 rc = pci_read_config_dword(dev, i << 2,
130                                            &memfree_pci_dev.dev.
131                                            dev_config_space[i]);
132                 if (rc) {
133                         eprintf("");
134                         return rc;
135                 }
136                 tprintf("config[%d]= 0x%08lx", i << 2,
137                         memfree_pci_dev.dev.dev_config_space[i]);
138         }
139
140         tprintf("");
141         memfree_pci_dev.dev.dev = dev;
142
143         /* map cr-space */
144         memfree_pci_dev.cr_space =
145             ioremap(memfree_pci_dev.dev.bar[0], 0x100000);
146         if (!memfree_pci_dev.cr_space) {
147                 eprintf("");
148                 return -1;
149         }
150
151         /* map uar */
152         memfree_pci_dev.uar =
153             ioremap(memfree_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, 0x1000);
154         if (!memfree_pci_dev.uar) {
155                 eprintf("");
156                 return -1;
157         }
158         tprintf("uar_base (pa:va) = 0x%lx 0x%lx",
159                 memfree_pci_dev.dev.bar[2] + UAR_IDX * 0x1000,
160                 memfree_pci_dev.uar);
161
162         tprintf("");
163
164         return 0;
165 }
166
167 static inline unsigned long lalign(unsigned long buf, unsigned long align)
168 {
169         return (unsigned long)((buf + align - 1) &
170                                (~(((unsigned long)align) - 1)));
171 }
172
173 static int init_dev_data(void)
174 {
175         unsigned long tmp;
176         unsigned long reserve_size = 32 * 1024 * 1024;
177
178         tmp = lalign(virt_to_bus(src_buf), STRUCT_ALIGN_SZ);
179
180         dev_buffers_p = bus_to_virt(tmp);
181         memreg_size = (__u32) (&memreg_size) - (__u32) dev_buffers_p;
182         tprintf("src_buf=0x%lx, dev_buffers_p=0x%lx, memreg_size=0x%x", src_buf,
183                 dev_buffers_p, memreg_size);
184
185         tprintf("inprm: va=0x%lx, pa=0x%lx", dev_buffers_p->inprm_buf,
186                 virt_to_bus(dev_buffers_p->inprm_buf));
187         tprintf("outprm: va=0x%lx, pa=0x%lx", dev_buffers_p->outprm_buf,
188                 virt_to_bus(dev_buffers_p->outprm_buf));
189
190         phys_mem.base =
191             (virt_to_phys(_text) - reserve_size) & (~(reserve_size - 1));
192
193         phys_mem.offset = 0;
194
195         return 0;
196 }
197
198 static int restore_config(void)
199 {
200         int i;
201         int rc;
202
203         for (i = 0; i < 64; ++i) {
204                 if (i != 22 && i != 23) {
205                         rc = pci_write_config_dword(memfree_pci_dev.dev.dev,
206                                                     i << 2,
207                                                     memfree_pci_dev.dev.
208                                                     dev_config_space[i]);
209                         if (rc) {
210                                 return rc;
211                         }
212                 }
213         }
214         return 0;
215 }
216
217 static void prep_init_hca_buf(struct init_hca_st *init_hca_p, void *buf)
218 {
219         unsigned long ptr;
220         __u8 shift;
221
222         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_init_hca_st));
223
224         ptr = (unsigned long)buf +
225             MT_BYTE_OFFSET(arbelprm_init_hca_st,
226                            qpc_eec_cqc_eqc_rdb_parameters);
227
228         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, qpc_base_addr_l);
229         INS_FLD(init_hca_p->qpc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
230                 qpc_base_addr_h);
231         INS_FLD(init_hca_p->qpc_base_addr_l >> shift, ptr,
232                 arbelprm_qpcbaseaddr_st, qpc_base_addr_l);
233         INS_FLD(init_hca_p->log_num_of_qp, ptr, arbelprm_qpcbaseaddr_st,
234                 log_num_of_qp);
235
236         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, eec_base_addr_l);
237         INS_FLD(init_hca_p->eec_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
238                 eec_base_addr_h);
239         INS_FLD(init_hca_p->eec_base_addr_l >> shift, ptr,
240                 arbelprm_qpcbaseaddr_st, eec_base_addr_l);
241         INS_FLD(init_hca_p->log_num_of_ee, ptr, arbelprm_qpcbaseaddr_st,
242                 log_num_of_ee);
243
244         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, srqc_base_addr_l);
245         INS_FLD(init_hca_p->srqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
246                 srqc_base_addr_h);
247         INS_FLD(init_hca_p->srqc_base_addr_l >> shift, ptr,
248                 arbelprm_qpcbaseaddr_st, srqc_base_addr_l);
249         INS_FLD(init_hca_p->log_num_of_srq, ptr, arbelprm_qpcbaseaddr_st,
250                 log_num_of_srq);
251
252         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, cqc_base_addr_l);
253         INS_FLD(init_hca_p->cqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
254                 cqc_base_addr_h);
255         INS_FLD(init_hca_p->cqc_base_addr_l >> shift, ptr,
256                 arbelprm_qpcbaseaddr_st, cqc_base_addr_l);
257         INS_FLD(init_hca_p->log_num_of_cq, ptr, arbelprm_qpcbaseaddr_st,
258                 log_num_of_cq);
259
260         INS_FLD(init_hca_p->eqpc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
261                 eqpc_base_addr_h);
262         INS_FLD(init_hca_p->eqpc_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
263                 eqpc_base_addr_l);
264
265         INS_FLD(init_hca_p->eeec_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
266                 eeec_base_addr_h);
267         INS_FLD(init_hca_p->eeec_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
268                 eeec_base_addr_l);
269
270         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, eqc_base_addr_l);
271         INS_FLD(init_hca_p->eqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
272                 eqc_base_addr_h);
273         INS_FLD(init_hca_p->eqc_base_addr_l >> shift, ptr,
274                 arbelprm_qpcbaseaddr_st, eqc_base_addr_l);
275         INS_FLD(init_hca_p->log_num_of_eq, ptr, arbelprm_qpcbaseaddr_st,
276                 log_num_eq);
277
278         INS_FLD(init_hca_p->rdb_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
279                 rdb_base_addr_h);
280         INS_FLD(init_hca_p->rdb_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
281                 rdb_base_addr_l);
282
283         ptr = (unsigned long)buf +
284             MT_BYTE_OFFSET(arbelprm_init_hca_st, multicast_parameters);
285
286         INS_FLD(init_hca_p->mc_base_addr_h, ptr, arbelprm_multicastparam_st,
287                 mc_base_addr_h);
288         INS_FLD(init_hca_p->mc_base_addr_l, ptr, arbelprm_multicastparam_st,
289                 mc_base_addr_l);
290         INS_FLD(init_hca_p->log_mc_table_entry_sz, ptr,
291                 arbelprm_multicastparam_st, log_mc_table_entry_sz);
292         INS_FLD(init_hca_p->mc_table_hash_sz, ptr, arbelprm_multicastparam_st,
293                 mc_table_hash_sz);
294         INS_FLD(init_hca_p->log_mc_table_sz, ptr, arbelprm_multicastparam_st,
295                 log_mc_table_sz);
296
297         ptr = (unsigned long)buf +
298             MT_BYTE_OFFSET(arbelprm_init_hca_st, tpt_parameters);
299
300         INS_FLD(init_hca_p->mpt_base_addr_h, ptr, arbelprm_tptparams_st,
301                 mpt_base_adr_h);
302         INS_FLD(init_hca_p->mpt_base_addr_l, ptr, arbelprm_tptparams_st,
303                 mpt_base_adr_l);
304         INS_FLD(init_hca_p->log_mpt_sz, ptr, arbelprm_tptparams_st, log_mpt_sz);
305         INS_FLD(init_hca_p->mtt_base_addr_h, ptr, arbelprm_tptparams_st,
306                 mtt_base_addr_h);
307         INS_FLD(init_hca_p->mtt_base_addr_l, ptr, arbelprm_tptparams_st,
308                 mtt_base_addr_l);
309
310         ptr = (unsigned long)buf +
311             MT_BYTE_OFFSET(arbelprm_init_hca_st, uar_parameters);
312
313         INS_FLD(init_hca_p->log_max_uars, ptr, arbelprm_uar_params_st,
314                 log_max_uars);
315
316 }
317
318 static void prep_sw2hw_mpt_buf(void *buf, __u32 mkey)
319 {
320         INS_FLD(1, buf, arbelprm_mpt_st, lw);
321         INS_FLD(1, buf, arbelprm_mpt_st, lr);
322         INS_FLD(1, buf, arbelprm_mpt_st, pa);
323         INS_FLD(1, buf, arbelprm_mpt_st, r_w);
324         INS_FLD(mkey, buf, arbelprm_mpt_st, mem_key);
325         INS_FLD(GLOBAL_PD, buf, arbelprm_mpt_st, pd);
326         INS_FLD(virt_to_bus(dev_buffers_p), buf, arbelprm_mpt_st,
327                 start_address_l);
328         INS_FLD(memreg_size, buf, arbelprm_mpt_st, reg_wnd_len_l);
329 }
330
331 static void prep_sw2hw_eq_buf(void *buf, struct eqe_t *eq_buf)
332 {
333         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_eqc_st));
334
335         INS_FLD(0xa, buf, arbelprm_eqc_st, st); /* fired */
336         INS_FLD(virt_to_bus(eq_buf), buf, arbelprm_eqc_st, start_address_l);
337         INS_FLD(LOG2_EQ_SZ, buf, arbelprm_eqc_st, log_eq_size);
338         INS_FLD(GLOBAL_PD, buf, arbelprm_eqc_st, pd);
339         INS_FLD(dev_ib_data.mkey, buf, arbelprm_eqc_st, lkey);
340 }
341
342 static void init_eq_buf(void *eq_buf)
343 {
344         struct eqe_t *eq = eq_buf;
345         int i, num_eqes = 1 << LOG2_EQ_SZ;
346
347         memset(eq, 0, num_eqes * sizeof eq[0]);
348         for (i = 0; i < num_eqes; ++i)
349                 WRITE_BYTE_VOL(&eq[i], EQE_OWNER_OFFSET, EQE_OWNER_VAL_HW);
350 }
351
352 static void prep_init_ib_buf(void *buf)
353 {
354         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_init_ib_st));
355
356         INS_FLD(MTU_2048, buf, arbelprm_init_ib_st, mtu_cap);
357         INS_FLD(3, buf, arbelprm_init_ib_st, port_width_cap);
358         INS_FLD(1, buf, arbelprm_init_ib_st, vl_cap);
359         INS_FLD(1, buf, arbelprm_init_ib_st, max_gid);
360         INS_FLD(64, buf, arbelprm_init_ib_st, max_pkey);
361 }
362
363 static void prep_sw2hw_cq_buf(void *buf, __u8 eqn,
364                               __u32 cqn,
365                               union cqe_st *cq_buf,
366                               __u32 cq_ci_db_record, __u32 cq_state_db_record)
367 {
368         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_completion_queue_context_st));
369
370         INS_FLD(0xA, buf, arbelprm_completion_queue_context_st, st);
371         INS_FLD(virt_to_bus(cq_buf), buf, arbelprm_completion_queue_context_st,
372                 start_address_l);
373         INS_FLD(LOG2_CQ_SZ, buf, arbelprm_completion_queue_context_st,
374                 log_cq_size);
375         INS_FLD(dev_ib_data.uar_idx, buf, arbelprm_completion_queue_context_st,
376                 usr_page);
377         INS_FLD(eqn, buf, arbelprm_completion_queue_context_st, c_eqn);
378         INS_FLD(GLOBAL_PD, buf, arbelprm_completion_queue_context_st, pd);
379         INS_FLD(dev_ib_data.mkey, buf, arbelprm_completion_queue_context_st,
380                 l_key);
381         INS_FLD(cqn, buf, arbelprm_completion_queue_context_st, cqn);
382         INS_FLD(cq_ci_db_record, buf, arbelprm_completion_queue_context_st,
383                 cq_ci_db_record);
384         INS_FLD(cq_state_db_record, buf, arbelprm_completion_queue_context_st,
385                 cq_state_db_record);
386 }
387
388 static void prep_rst2init_qpee_buf(void *buf,
389                                    __u32 snd_cqn,
390                                    __u32 rcv_cqn,
391                                    __u32 qkey,
392                                    __u32 log_rq_size,
393                                    __u32 log_rq_stride,
394                                    __u32 log_sq_size,
395                                    __u32 log_sq_stride,
396                                    __u32 snd_wqe_base_adr_l,
397                                    __u32 snd_db_record_index,
398                                    __u32 rcv_wqe_base_adr_l,
399                                    __u32 rcv_db_record_index)
400 {
401         void *tmp;
402         int shift;
403         struct qp_ee_state_tarnisition_st *prm = buf;
404
405         memset(buf, 0, sizeof *prm);
406
407         tprintf("snd_cqn=0x%lx", snd_cqn);
408         tprintf("rcv_cqn=0x%lx", rcv_cqn);
409         tprintf("qkey=0x%lx", qkey);
410         tprintf("log_rq_size=0x%lx", log_rq_size);
411         tprintf("log_rq_stride=0x%lx", log_rq_stride);
412         tprintf("log_sq_size=0x%lx", log_sq_size);
413         tprintf("log_sq_stride=0x%lx", log_sq_stride);
414         tprintf("snd_wqe_base_adr_l=0x%lx", snd_wqe_base_adr_l);
415         tprintf("snd_db_record_index=0x%lx", snd_db_record_index);
416         tprintf("rcv_wqe_base_adr_l=0x%lx", rcv_wqe_base_adr_l);
417         tprintf("rcv_db_record_index=0x%lx", rcv_db_record_index);
418
419         tmp = &prm->ctx;
420         INS_FLD(TS_UD, tmp, arbelprm_queue_pair_ee_context_entry_st, st);
421         INS_FLD(PM_STATE_MIGRATED, tmp, arbelprm_queue_pair_ee_context_entry_st,
422                 pm_state);
423         INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, de);
424         INS_FLD(MTU_2048, tmp, arbelprm_queue_pair_ee_context_entry_st, mtu);
425         INS_FLD(11, tmp, arbelprm_queue_pair_ee_context_entry_st, msg_max);
426         INS_FLD(log_rq_size, tmp, arbelprm_queue_pair_ee_context_entry_st,
427                 log_rq_size);
428         INS_FLD(log_rq_stride, tmp, arbelprm_queue_pair_ee_context_entry_st,
429                 log_rq_stride);
430         INS_FLD(log_sq_size, tmp, arbelprm_queue_pair_ee_context_entry_st,
431                 log_sq_size);
432         INS_FLD(log_sq_stride, tmp, arbelprm_queue_pair_ee_context_entry_st,
433                 log_sq_stride);
434         INS_FLD(dev_ib_data.uar_idx, tmp,
435                 arbelprm_queue_pair_ee_context_entry_st, usr_page);
436         INS_FLD(GLOBAL_PD, tmp, arbelprm_queue_pair_ee_context_entry_st, pd);
437         INS_FLD(dev_ib_data.mkey, tmp, arbelprm_queue_pair_ee_context_entry_st,
438                 wqe_lkey);
439         INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, ssc);
440         INS_FLD(snd_cqn, tmp, arbelprm_queue_pair_ee_context_entry_st, cqn_snd);
441         shift =
442             32 - MT_BIT_SIZE(arbelprm_queue_pair_ee_context_entry_st,
443                              snd_wqe_base_adr_l);
444         INS_FLD(snd_wqe_base_adr_l >> shift, tmp,
445                 arbelprm_queue_pair_ee_context_entry_st, snd_wqe_base_adr_l);
446         INS_FLD(snd_db_record_index, tmp,
447                 arbelprm_queue_pair_ee_context_entry_st, snd_db_record_index);
448         INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, rsc);
449         INS_FLD(rcv_cqn, tmp, arbelprm_queue_pair_ee_context_entry_st, cqn_rcv);
450         shift =
451             32 - MT_BIT_SIZE(arbelprm_queue_pair_ee_context_entry_st,
452                              rcv_wqe_base_adr_l);
453         INS_FLD(rcv_wqe_base_adr_l >> shift, tmp,
454                 arbelprm_queue_pair_ee_context_entry_st, rcv_wqe_base_adr_l);
455         INS_FLD(rcv_db_record_index, tmp,
456                 arbelprm_queue_pair_ee_context_entry_st, rcv_db_record_index);
457         INS_FLD(qkey, tmp, arbelprm_queue_pair_ee_context_entry_st, q_key);
458
459         tmp =
460             (__u8 *) (&prm->ctx) +
461             MT_BYTE_OFFSET(arbelprm_queue_pair_ee_context_entry_st,
462                            primary_address_path);
463         INS_FLD(dev_ib_data.port, tmp, arbelprm_address_path_st, port_number);
464
465 }
466
467 static void prep_init2rtr_qpee_buf(void *buf)
468 {
469         struct qp_ee_state_tarnisition_st *prm;
470
471         prm = (struct qp_ee_state_tarnisition_st *)buf;
472
473         memset(prm, 0, sizeof *prm);
474
475         INS_FLD(MTU_2048, &prm->ctx, arbelprm_queue_pair_ee_context_entry_st,
476                 mtu);
477         INS_FLD(11, &prm->ctx, arbelprm_queue_pair_ee_context_entry_st,
478                 msg_max);
479 }
480
481 static void init_av_array(void)
482 {
483 }
484
485 /*
486  * my_log2()
487  */
488 static int my_log2(unsigned long arg)
489 {
490         int i;
491         __u32 tmp;
492
493         if (arg == 0) {
494                 return INT_MIN; /* log2(0) = -infinity */
495         }
496
497         tmp = 1;
498         i = 0;
499         while (tmp < arg) {
500                 tmp = tmp << 1;
501                 ++i;
502         }
503
504         return i;
505 }
506
507 /*
508  * get_req_icm_pages
509  */
510 static unsigned long get_req_icm_pages(unsigned long log2_reserved,
511                                        unsigned long app_rsrc,
512                                        unsigned long entry_size,
513                                        unsigned long *log2_entries_p)
514 {
515         unsigned long size;
516         unsigned long log2_entries;
517
518         log2_entries = my_log2((1 << log2_reserved) + app_rsrc);
519         *log2_entries_p = log2_entries;
520         size = (1 << log2_entries) * entry_size;
521
522         return (size + 4095) >> 12;
523 }
524
525 static void init_uar_context(void *uar_context_va)
526 {
527         void *ptr;
528         /* clear all uar context */
529         memset(uar_context_va, 0, 4096);
530
531         ptr = uar_context_va + MADS_RCV_CQ_ARM_DB_IDX * 8;
532         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
533         INS_FLD_TO_BE(dev_ib_data.mads_qp.rcv_cq.cqn, ptr,
534                       arbelprm_cq_arm_db_record_st, cq_number);
535
536         ptr = uar_context_va + MADS_SND_CQ_ARM_DB_IDX * 8;
537         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
538         INS_FLD_TO_BE(dev_ib_data.mads_qp.snd_cq.cqn, ptr,
539                       arbelprm_cq_arm_db_record_st, cq_number);
540
541         ptr = uar_context_va + IPOIB_RCV_CQ_ARM_DB_IDX * 8;
542         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
543         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.rcv_cq.cqn, ptr,
544                       arbelprm_cq_arm_db_record_st, cq_number);
545
546         ptr = uar_context_va + IPOIB_SND_CQ_ARM_DB_IDX * 8;
547         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
548         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.snd_cq.cqn, ptr,
549                       arbelprm_cq_arm_db_record_st, cq_number);
550
551         ptr = uar_context_va + MADS_SND_QP_DB_IDX * 8;
552         INS_FLD_TO_BE(UAR_RES_SQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
553         INS_FLD_TO_BE(dev_ib_data.mads_qp.qpn, ptr, arbelprm_qp_db_record_st,
554                       qp_number);
555
556         ptr = uar_context_va + IPOIB_SND_QP_DB_IDX * 8;
557         INS_FLD_TO_BE(UAR_RES_SQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
558         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.qpn, ptr, arbelprm_qp_db_record_st,
559                       qp_number);
560
561         ptr = uar_context_va + GROUP_SEP_IDX * 8;
562         INS_FLD_TO_BE(UAR_RES_GROUP_SEP, ptr, arbelprm_cq_arm_db_record_st,
563                       res);
564
565         ptr = uar_context_va + MADS_RCV_QP_DB_IDX * 8;
566         INS_FLD_TO_BE(UAR_RES_RQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
567         INS_FLD_TO_BE(dev_ib_data.mads_qp.qpn, ptr, arbelprm_qp_db_record_st,
568                       qp_number);
569
570         ptr = uar_context_va + IPOIB_RCV_QP_DB_IDX * 8;
571         INS_FLD_TO_BE(UAR_RES_RQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
572         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.qpn, ptr, arbelprm_qp_db_record_st,
573                       qp_number);
574
575         ptr = uar_context_va + MADS_RCV_CQ_CI_DB_IDX * 8;
576         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
577         INS_FLD_TO_BE(dev_ib_data.mads_qp.rcv_cq.cqn, ptr,
578                       arbelprm_cq_ci_db_record_st, cq_number);
579
580         ptr = uar_context_va + MADS_SND_CQ_CI_DB_IDX * 8;
581         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
582         INS_FLD_TO_BE(dev_ib_data.mads_qp.snd_cq.cqn, ptr,
583                       arbelprm_cq_ci_db_record_st, cq_number);
584
585         ptr = uar_context_va + IPOIB_RCV_CQ_CI_DB_IDX * 8;
586         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
587         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.rcv_cq.cqn, ptr,
588                       arbelprm_cq_ci_db_record_st, cq_number);
589
590         ptr = uar_context_va + IPOIB_SND_CQ_CI_DB_IDX * 8;
591         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
592         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.snd_cq.cqn, ptr,
593                       arbelprm_cq_ci_db_record_st, cq_number);
594
595 }
596
597 static int setup_hca(__u8 port, void **eq_p)
598 {
599         int ret;
600         int rc;
601         struct query_fw_st qfw;
602         struct map_icm_st map_obj;
603         struct dev_lim_st dev_lim;
604         struct init_hca_st init_hca;
605         __u8 log2_pages;
606         unsigned long icm_start, icm_size, tmp;
607         unsigned long log2_entries;
608         __u32 aux_pages;
609         __u32 mem_key, key, tmp_key;
610         __u8 eqn;
611         __u32 event_mask;
612         struct eqe_t *eq_buf;
613         void *inprm;
614         unsigned long bus_addr;
615         struct query_adapter_st qa;
616         __u8 log_max_uars = 1;
617         void *uar_context_va;
618         __u32 uar_context_pa;
619
620         tprintf("called");
621         init_dev_data();
622         inprm = get_inprm_buf();
623
624         rc = reset_hca();
625         if (rc) {
626                 eprintf("");
627                 return rc;
628         } else {
629                 tprintf("reset_hca() success");
630         }
631
632         mdelay(1000);           /* wait for 1 sec */
633
634         rc = restore_config();
635         if (rc) {
636                 eprintf("");
637                 return rc;
638         } else {
639                 tprintf("restore_config() success");
640         }
641
642         dev_ib_data.pd = GLOBAL_PD;
643         dev_ib_data.port = port;
644         dev_ib_data.qkey = GLOBAL_QKEY;
645
646         rc = cmd_query_fw(&qfw);
647         if (rc) {
648                 eprintf("");
649                 return rc;
650         }
651         else {
652                 tprintf("cmd_query_fw() success");
653
654                 if (print_info) {
655                         printf("FW ver = %d.%d.%d\n",
656                         qfw.fw_rev_major,
657                         qfw.fw_rev_minor,
658                         qfw.fw_rev_subminor);
659                 }
660
661                 tprintf("fw_rev_major=%d", qfw.fw_rev_major);
662                 tprintf("fw_rev_minor=%d", qfw.fw_rev_minor);
663                 tprintf("fw_rev_subminor=%d", qfw.fw_rev_subminor);
664                 tprintf("error_buf_start_h=0x%x", qfw.error_buf_start_h);
665                 tprintf("error_buf_start_l=0x%x", qfw.error_buf_start_l);
666                 tprintf("error_buf_size=%d", qfw.error_buf_size);
667         }
668
669
670
671         bus_addr =
672             ((unsigned long)((u64) qfw.error_buf_start_h << 32) | qfw.
673              error_buf_start_l);
674     dev_ib_data.error_buf_addr= ioremap(bus_addr,
675                                                                                 qfw.error_buf_size*4);
676         dev_ib_data.error_buf_size= qfw.error_buf_size;
677         if (!dev_ib_data.error_buf_addr) {
678                 eprintf("");
679                 return -1;
680         }
681
682
683         bus_addr =
684             ((unsigned long)((u64) qfw.clear_int_addr.addr_h << 32) | qfw.
685              clear_int_addr.addr_l);
686         dev_ib_data.clr_int_addr = bus_to_virt(bus_addr);
687
688         rc = cmd_enable_lam();
689         if (rc == 0x22 /* LAM_NOT_PRE -- need to put a name here */ ) {
690                 // ??????
691         } else if (rc == 0) {
692                 // ??????
693         } else {
694                 eprintf("");
695                 return rc;
696         }
697
698         log2_pages = my_log2(qfw.fw_pages);
699
700         memset(&map_obj, 0, sizeof map_obj);
701         map_obj.num_vpm = 1;
702         map_obj.vpm_arr[0].log2_size = log2_pages;
703         map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
704         rc = cmd_map_fa(&map_obj);
705         if (rc) {
706                 eprintf("");
707                 return rc;
708         }
709         phys_mem.offset += 1 << (log2_pages + 12);
710
711         rc = cmd_run_fw();
712         if (rc) {
713                 ret = -1;
714                 eprintf("");
715                 goto undo_map_fa;
716         }
717
718         rc = cmd_mod_stat_cfg();
719         if (rc) {
720                 ret = -1;
721                 eprintf("");
722                 goto undo_map_fa;
723         }
724
725         rc = cmd_query_dev_lim(&dev_lim);
726         if (rc) {
727                 ret = -1;
728                 eprintf("");
729                 goto undo_map_fa;
730         }
731
732         dev_ib_data.uar_idx = dev_lim.num_rsvd_uars;
733
734         tprintf("max_icm_size_h=0x%lx", dev_lim.max_icm_size_h);
735         tprintf("max_icm_size_l=0x%lx", dev_lim.max_icm_size_l);
736
737         memset(&init_hca, 0, sizeof init_hca);
738         icm_start = 0;
739         icm_size = 0;
740
741         icm_start += ((dev_lim.num_rsvd_uars + 1) << 12);
742         icm_size += ((dev_lim.num_rsvd_uars + 1) << 12);
743
744         tmp = get_req_icm_pages(dev_lim.log2_rsvd_qps,
745                                 MAX_APP_QPS,
746                                 dev_lim.qpc_entry_sz, &log2_entries);
747         init_hca.qpc_base_addr_l = icm_start;
748         init_hca.log_num_of_qp = log2_entries;
749         icm_start += (tmp << 12);
750         icm_size += (tmp << 12);
751
752         init_hca.eqpc_base_addr_l = icm_start;
753         icm_start += (tmp << 12);
754         icm_size += (tmp << 12);
755
756         tmp = get_req_icm_pages(dev_lim.log2_rsvd_srqs,
757                                 0, dev_lim.srq_entry_sz, &log2_entries);
758         init_hca.srqc_base_addr_l = icm_start;
759         init_hca.log_num_of_srq = log2_entries;
760         icm_start += (tmp << 12);
761         icm_size += (tmp << 12);
762
763         tmp = get_req_icm_pages(dev_lim.log2_rsvd_ees,
764                                 0, dev_lim.eec_entry_sz, &log2_entries);
765         init_hca.eec_base_addr_l = icm_start;
766         init_hca.log_num_of_ee = log2_entries;
767         icm_start += (tmp << 12);
768         icm_size += (tmp << 12);
769
770         init_hca.eeec_base_addr_l = icm_start;
771         icm_start += (tmp << 12);
772         icm_size += (tmp << 12);
773
774         tmp = get_req_icm_pages(dev_lim.log2_rsvd_cqs,
775                                 MAX_APP_CQS,
776                                 dev_lim.cqc_entry_sz, &log2_entries);
777         init_hca.cqc_base_addr_l = icm_start;
778         init_hca.log_num_of_cq = log2_entries;
779         icm_start += (tmp << 12);
780         icm_size += (tmp << 12);
781
782         tmp = get_req_icm_pages(dev_lim.log2_rsvd_mtts,
783                                 0, dev_lim.mtt_entry_sz, &log2_entries);
784         init_hca.mtt_base_addr_l = icm_start;
785         icm_start += (tmp << 12);
786         icm_size += (tmp << 12);
787
788         tmp = get_req_icm_pages(dev_lim.log2_rsvd_mrws,
789                                 1, dev_lim.mpt_entry_sz, &log2_entries);
790         init_hca.mpt_base_addr_l = icm_start;
791         init_hca.log_mpt_sz = log2_entries;
792         icm_start += (tmp << 12);
793         icm_size += (tmp << 12);
794
795         tmp = get_req_icm_pages(dev_lim.log2_rsvd_rdbs, 1, 32,  /* size of rdb entry */
796                                 &log2_entries);
797         init_hca.rdb_base_addr_l = icm_start;
798         icm_start += (tmp << 12);
799         icm_size += (tmp << 12);
800
801         init_hca.eqc_base_addr_l = icm_start;
802         init_hca.log_num_of_eq = LOG2_EQS;
803         tmp = dev_lim.eqc_entry_sz * (1 << LOG2_EQS);
804         icm_start += tmp;
805         icm_size += tmp;
806
807         init_hca.mc_base_addr_l = icm_start;
808         init_hca.log_mc_table_entry_sz =
809             my_log2(MT_STRUCT_SIZE(arbelprm_mgm_entry_st));
810         init_hca.mc_table_hash_sz = 8;
811         init_hca.log_mc_table_sz = 3;
812         icm_size +=
813             (MT_STRUCT_SIZE(arbelprm_mgm_entry_st) * init_hca.mc_table_hash_sz);
814         icm_start +=
815             (MT_STRUCT_SIZE(arbelprm_mgm_entry_st) * init_hca.mc_table_hash_sz);
816
817         rc = cmd_set_icm_size(icm_size, &aux_pages);
818         if (rc) {
819                 ret = -1;
820                 eprintf("");
821                 goto undo_map_fa;
822         }
823
824         memset(&map_obj, 0, sizeof map_obj);
825         map_obj.num_vpm = 1;
826         map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
827         map_obj.vpm_arr[0].log2_size = my_log2(aux_pages);
828         rc = cmd_map_icm_aux(&map_obj);
829         if (rc) {
830                 ret = -1;
831                 eprintf("");
832                 goto undo_map_fa;
833         }
834         phys_mem.offset += (1 << (map_obj.vpm_arr[0].log2_size + 12));
835
836         uar_context_pa = phys_mem.base + phys_mem.offset +
837             dev_ib_data.uar_idx * 4096;
838         uar_context_va = phys_to_virt(uar_context_pa);
839         tprintf("uar_context: va=0x%lx, pa=0x%lx", uar_context_va,
840                 uar_context_pa);
841         dev_ib_data.uar_context_base = uar_context_va;
842
843         memset(&map_obj, 0, sizeof map_obj);
844         map_obj.num_vpm = 1;
845         map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
846         map_obj.vpm_arr[0].log2_size = my_log2((icm_size + 4095) >> 12);
847         rc = cmd_map_icm(&map_obj);
848         if (rc) {
849                 ret = -1;
850                 eprintf("");
851                 goto undo_map_fa;
852         }
853         phys_mem.offset += (1 << (map_obj.vpm_arr[0].log2_size + 12));
854
855         init_hca.log_max_uars = log_max_uars;
856         tprintf("inprm: va=0x%lx, pa=0x%lx", inprm, virt_to_bus(inprm));
857         prep_init_hca_buf(&init_hca, inprm);
858         rc = cmd_init_hca(inprm, MT_STRUCT_SIZE(arbelprm_init_hca_st));
859         if (rc) {
860                 ret = -1;
861                 eprintf("");
862                 goto undo_map_fa;
863         }
864
865         rc = cmd_query_adapter(&qa);
866         if (rc) {
867                 eprintf("");
868                 return rc;
869         }
870         dev_ib_data.clr_int_data = 1 << qa.intapin;
871
872         tmp_key = 1 << dev_lim.log2_rsvd_mrws | MKEY_PREFIX;
873         mem_key = 1 << (dev_lim.log2_rsvd_mrws + 8) | (MKEY_PREFIX >> 24);
874         prep_sw2hw_mpt_buf(inprm, tmp_key);
875         rc = cmd_sw2hw_mpt(&key, 1 << dev_lim.log2_rsvd_mrws, inprm,
876                            SW2HW_MPT_IBUF_SZ);
877         if (rc) {
878                 ret = -1;
879                 eprintf("");
880                 goto undo_map_fa;
881         } else {
882                 tprintf("cmd_sw2hw_mpt() success, key=0x%lx", mem_key);
883         }
884         dev_ib_data.mkey = mem_key;
885
886         eqn = EQN;
887         /* allocate a single EQ which will receive 
888            all the events */
889         eq_buf = dev_buffers_p->eq_buf;
890         init_eq_buf(eq_buf);    /* put in HW ownership */
891         prep_sw2hw_eq_buf(inprm, eq_buf);
892         rc = cmd_sw2hw_eq(SW2HW_EQ_IBUF_SZ);
893         if (rc) {
894                 ret = -1;
895                 eprintf("");
896                 goto undo_sw2hw_mpt;
897         } else
898                 tprintf("cmd_sw2hw_eq() success");
899
900         event_mask = (1 << XDEV_EV_TYPE_CQ_COMP) |
901             (1 << XDEV_EV_TYPE_CQ_ERR) |
902             (1 << XDEV_EV_TYPE_LOCAL_WQ_CATAS_ERR) |
903             (1 << XDEV_EV_TYPE_PORT_ERR) |
904             (1 << XDEV_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR) |
905             (1 << XDEV_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR) |
906             (1 << TAVOR_IF_EV_TYPE_OVERRUN);
907         rc = cmd_map_eq(eqn, event_mask, 1);
908         if (rc) {
909                 ret = -1;
910                 eprintf("");
911                 goto undo_sw2hw_eq;
912         } else
913                 tprintf("cmd_map_eq() success");
914
915         dev_ib_data.eq.eqn = eqn;
916         dev_ib_data.eq.eq_buf = eq_buf;
917         dev_ib_data.eq.cons_counter = 0;
918         dev_ib_data.eq.eq_size = 1 << LOG2_EQ_SZ;
919         bus_addr =
920             ((unsigned long)((u64) qfw.eq_ci_table.addr_h << 32) | qfw.
921              eq_ci_table.addr_l)
922             + eqn * 8;
923         dev_ib_data.eq.ci_base_base_addr = bus_to_virt(bus_addr);
924         *eq_p = &dev_ib_data.eq;
925
926         prep_init_ib_buf(inprm);
927         rc = cmd_init_ib(port, inprm, INIT_IB_IBUF_SZ);
928         if (rc) {
929                 ret = -1;
930                 eprintf("");
931                 goto undo_sw2hw_eq;
932         } else
933                 tprintf("cmd_init_ib() success");
934
935         init_av_array();
936         tprintf("init_av_array() done");
937
938         /* set the qp and cq numbers according
939            to the results of query_dev_lim */
940         dev_ib_data.mads_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
941             +QPN_BASE + MADS_QPN_SN;
942         dev_ib_data.ipoib_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
943             +QPN_BASE + IPOIB_QPN_SN;
944
945         dev_ib_data.mads_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
946             MADS_SND_CQN_SN;
947         dev_ib_data.mads_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
948             MADS_RCV_CQN_SN;
949
950         dev_ib_data.ipoib_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
951             IPOIB_SND_CQN_SN;
952         dev_ib_data.ipoib_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
953             IPOIB_RCV_CQN_SN;
954
955         init_uar_context(uar_context_va);
956
957         ret = 0;
958         goto exit;
959
960       undo_sw2hw_eq:
961         rc = cmd_hw2sw_eq(eqn);
962         if (rc)
963                 eprintf("");
964         else
965                 tprintf("cmd_hw2sw_eq() success");
966
967       undo_sw2hw_mpt:
968         rc = cmd_hw2sw_mpt(tmp_key);
969         if (rc)
970                 eprintf("");
971
972       undo_map_fa:
973         rc = cmd_unmap_fa();
974         if (rc)
975                 eprintf("");
976
977       exit:
978         return ret;
979 }
980
981 static void *get_inprm_buf(void)
982 {
983         return dev_buffers_p->inprm_buf;
984 }
985
986 static void *get_outprm_buf(void)
987 {
988         return dev_buffers_p->outprm_buf;
989 }
990
991 static void *get_send_wqe_buf(void *wqe, __u8 index)
992 {
993         struct ud_send_wqe_st *snd_wqe = wqe;
994
995         return bus_to_virt(be32_to_cpu(snd_wqe->mpointer[index].local_addr_l));
996 }
997
998 static void *get_rcv_wqe_buf(void *wqe, __u8 index)
999 {
1000         struct recv_wqe_st *rcv_wqe = wqe;
1001
1002         return bus_to_virt(be32_to_cpu(rcv_wqe->mpointer[index].local_addr_l));
1003 }
1004
1005 static void modify_av_params(struct ud_av_st *av,
1006                              __u16 dlid,
1007                              __u8 g,
1008                              __u8 sl, __u8 rate, union ib_gid_u *gid, __u32 qpn)
1009 {
1010         memset(&av->av, 0, sizeof av->av);
1011
1012         INS_FLD_TO_BE(dev_ib_data.port, &av->av, arbelprm_ud_address_vector_st,
1013                       port_number);
1014         INS_FLD_TO_BE(dev_ib_data.pd, &av->av, arbelprm_ud_address_vector_st,
1015                       pd);
1016         INS_FLD_TO_BE(dlid, &av->av, arbelprm_ud_address_vector_st, rlid);
1017         INS_FLD_TO_BE(g, &av->av, arbelprm_ud_address_vector_st, g);
1018         INS_FLD_TO_BE(sl, &av->av, arbelprm_ud_address_vector_st, sl);
1019         INS_FLD_TO_BE(3, &av->av, arbelprm_ud_address_vector_st, msg);
1020
1021         if (rate >= 3)
1022                 INS_FLD_TO_BE(0, &av->av, arbelprm_ud_address_vector_st, max_stat_rate);        /* 4x */
1023         else
1024                 INS_FLD_TO_BE(1, &av->av, arbelprm_ud_address_vector_st, max_stat_rate);        /* 1x */
1025
1026         if (g) {
1027                 if (gid) {
1028                         INS_FLD(*((__u32 *) (&gid->raw[0])), &av->av,
1029                                 arbelprm_ud_address_vector_st, rgid_127_96);
1030                         INS_FLD(*((__u32 *) (&gid->raw[4])), &av->av,
1031                                 arbelprm_ud_address_vector_st, rgid_95_64);
1032                         INS_FLD(*((__u32 *) (&gid->raw[8])), &av->av,
1033                                 arbelprm_ud_address_vector_st, rgid_63_32);
1034                         INS_FLD(*((__u32 *) (&gid->raw[12])), &av->av,
1035                                 arbelprm_ud_address_vector_st, rgid_31_0);
1036                 } else {
1037                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1038                                 rgid_127_96);
1039                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1040                                 rgid_95_64);
1041                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1042                                 rgid_63_32);
1043                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1044                                 rgid_31_0);
1045                 }
1046         } else {
1047                 INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_127_96);
1048                 INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_95_64);
1049                 INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_63_32);
1050                 INS_FLD(2, &av->av, arbelprm_ud_address_vector_st, rgid_31_0);
1051         }
1052         av->dest_qp = qpn;
1053         av->qkey = dev_ib_data.qkey;
1054 }
1055
1056 static void init_cq_buf(union cqe_st *cq_buf, __u8 num_cqes)
1057 {
1058         int i;
1059
1060         memset(cq_buf, 0, sizeof(union cqe_st) * num_cqes);
1061         for (i = 0; i < num_cqes; ++i) {
1062                 WRITE_BYTE_VOL(&cq_buf[i], CQE_OWNER_OFFSET, CQE_OWNER_VAL_HW);
1063         }
1064 }
1065
1066 static int post_rcv_buf(struct udqp_st *qp, struct recv_wqe_st *rcv_wqe)
1067 {
1068         int i;
1069
1070         /* put a valid lkey */
1071         for (i = 0; i < MAX_SCATTER; ++i) {
1072                 rcv_wqe->mpointer[i].lkey = cpu_to_be32(dev_ib_data.mkey);
1073         }
1074
1075         qp->post_rcv_counter++;
1076         WRITE_WORD_VOL(qp->rcv_uar_context, 2, htons(qp->post_rcv_counter));
1077
1078         return 0;
1079 }
1080
1081 static int post_send_req(void *qph, void *wqeh, __u8 num_gather)
1082 {
1083         int rc;
1084         struct udqp_st *qp = qph;
1085         struct ud_send_wqe_st *snd_wqe = wqeh;
1086         struct send_doorbell_st dbell;
1087         __u32 nds;
1088
1089         qp->post_send_counter++;
1090
1091         WRITE_WORD_VOL(qp->send_uar_context, 2, htons(qp->post_send_counter));
1092
1093         memset(&dbell, 0, sizeof dbell);
1094         INS_FLD(XDEV_NOPCODE_SEND, &dbell, arbelprm_send_doorbell_st, nopcode);
1095         INS_FLD(1, &dbell, arbelprm_send_doorbell_st, f);
1096         INS_FLD(qp->post_send_counter - 1, &dbell, arbelprm_send_doorbell_st,
1097                 wqe_counter);
1098         INS_FLD(1, &dbell, arbelprm_send_doorbell_st, wqe_cnt);
1099         nds = (sizeof(snd_wqe->next) +
1100                sizeof(snd_wqe->udseg) +
1101                sizeof(snd_wqe->mpointer[0]) * num_gather) >> 4;
1102         INS_FLD(nds, &dbell, arbelprm_send_doorbell_st, nds);
1103         INS_FLD(qp->qpn, &dbell, arbelprm_send_doorbell_st, qpn);
1104
1105         if (qp->last_posted_snd_wqe) {
1106                 INS_FLD_TO_BE(nds,
1107                               &qp->last_posted_snd_wqe->next.next,
1108                               arbelprm_wqe_segment_next_st, nds);
1109                 INS_FLD_TO_BE(1,
1110                               &qp->last_posted_snd_wqe->next.next,
1111                               arbelprm_wqe_segment_next_st, f);
1112                 INS_FLD_TO_BE(XDEV_NOPCODE_SEND,
1113                               &qp->last_posted_snd_wqe->next.next,
1114                               arbelprm_wqe_segment_next_st, nopcode);
1115         }
1116
1117         rc = cmd_post_doorbell(&dbell, POST_SND_OFFSET);
1118         if (!rc) {
1119                 qp->last_posted_snd_wqe = snd_wqe;
1120         }
1121
1122         return rc;
1123
1124 }
1125
1126 static int create_mads_qp(void **qp_pp, void **snd_cq_pp, void **rcv_cq_pp)
1127 {
1128         __u8 i, next_i, j, k;
1129         int rc;
1130         struct udqp_st *qp;
1131         __u32 bus_addr;
1132         __u8 nds;
1133         void *ptr;
1134
1135         qp = &dev_ib_data.mads_qp;
1136
1137         /* set the pointer to the receive WQEs buffer */
1138         qp->rcv_wq = dev_buffers_p->mads_qp_rcv_queue;
1139
1140         qp->send_buf_sz = MAD_BUF_SZ;
1141         qp->rcv_buf_sz = MAD_BUF_SZ;
1142
1143         qp->max_recv_wqes = NUM_MADS_RCV_WQES;  /* max wqes in this work queue */
1144         qp->recv_wqe_cur_free = NUM_MADS_RCV_WQES;      /* current free wqes */
1145         qp->recv_wqe_alloc_idx = 0;     /* index from wqes can be allocated if there are free wqes */
1146
1147         qp->rcv_uar_context =
1148             dev_ib_data.uar_context_base + 8 * MADS_RCV_QP_DB_IDX;
1149         qp->send_uar_context =
1150             dev_ib_data.uar_context_base + 8 * MADS_SND_QP_DB_IDX;
1151
1152         memset(&qp->rcv_wq[0], 0, NUM_MADS_RCV_WQES * sizeof(qp->rcv_wq[0]));
1153         nds = sizeof(qp->rcv_wq[0].wqe) >> 4;
1154         /* iterrate through the list */
1155         for (j = 0, i = 0, next_i = 1;
1156              j < NUM_MADS_RCV_WQES;
1157              MOD_INC(i, NUM_MADS_RCV_WQES), MOD_INC(next_i, NUM_MADS_RCV_WQES),
1158              ++j) {
1159
1160                 qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
1161                 /* link the WQE to the next one */
1162                 bus_addr = virt_to_bus(&qp->rcv_wq[next_i].wqe);
1163                 ptr = qp->rcv_wq[i].wqe.control +
1164                     MT_BYTE_OFFSET(arbelprm_wqe_segment_ctrl_recv_st,
1165                                    wqe_segment_next);
1166                 INS_FLD(bus_addr >> 6, ptr, arbelprm_recv_wqe_segment_next_st,
1167                         nda_31_6);
1168                 INS_FLD(nds, ptr, arbelprm_recv_wqe_segment_next_st, nds);
1169
1170                 /* set the allocated buffers */
1171                 qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
1172                 bus_addr = virt_to_bus(qp->rcv_bufs[i]);
1173                 qp->rcv_wq[i].wqe.mpointer[0].local_addr_l = bus_addr;
1174                 qp->rcv_wq[i].wqe.mpointer[0].byte_count = GRH_SIZE;
1175                 bus_addr = virt_to_bus(qp->rcv_bufs[i] + GRH_SIZE);
1176                 qp->rcv_wq[i].wqe.mpointer[1].local_addr_l = bus_addr;
1177                 qp->rcv_wq[i].wqe.mpointer[1].byte_count = MAD_BUF_SZ;
1178
1179                 for (k = 0; k < (((sizeof(qp->rcv_wq[i])) >> 4) - 1); ++k) {
1180                         qp->rcv_wq[i].wqe.mpointer[k].lkey = INVALID_WQE_LKEY;
1181                 }
1182         }
1183         cpu_to_be_buf(&qp->rcv_wq[0],
1184                       NUM_MADS_RCV_WQES * sizeof(qp->rcv_wq[0]));
1185
1186         for (i = 0; i < qp->max_recv_wqes; ++i) {
1187                 qp->rcv_wq[i].wqe_cont.qp = qp;
1188         }
1189
1190         /* set the pointer to the send WQEs buffer */
1191         qp->snd_wq = dev_buffers_p->mads_qp_snd_queue;
1192
1193         qp->snd_wqe_alloc_idx = 0;
1194         qp->max_snd_wqes = NUM_MADS_SND_WQES;
1195         qp->snd_wqe_cur_free = NUM_MADS_SND_WQES;
1196
1197         memset(&qp->snd_wq[0], 0, NUM_MADS_SND_WQES * sizeof(qp->snd_wq[i]));
1198         /* iterrate through the list */
1199         for (j = 0, i = 0, next_i = 1;
1200              j < NUM_MADS_RCV_WQES;
1201              MOD_INC(i, NUM_MADS_SND_WQES), MOD_INC(next_i, NUM_MADS_SND_WQES),
1202              ++j) {
1203
1204                 /* link the WQE to the next one */
1205                 bus_addr = virt_to_bus(&qp->snd_wq[next_i].wqe_cont.wqe);
1206                 INS_FLD(bus_addr >> 6, &qp->snd_wq[i].wqe_cont.wqe.next.next,
1207                         arbelprm_wqe_segment_next_st, nda_31_6);
1208
1209                 /* set the allocated buffers */
1210                 qp->snd_bufs[i] = ib_buffers.send_mad_buf[i];
1211                 bus_addr = virt_to_bus(qp->snd_bufs[i]);
1212                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].local_addr_l = bus_addr;
1213                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].lkey = dev_ib_data.mkey;
1214                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].byte_count =
1215                     qp->send_buf_sz;
1216
1217         }
1218
1219         cpu_to_be_buf(&qp->snd_wq[0],
1220                       NUM_MADS_SND_WQES * sizeof(qp->snd_wq[i]));
1221
1222         for (i = 0; i < qp->max_snd_wqes; ++i) {
1223                 qp->snd_wq[i].wqe_cont.qp = qp;
1224         }
1225
1226         /* qp number and cq numbers are already set up */
1227         qp->snd_cq.cq_buf = dev_buffers_p->mads_snd_cq_buf;
1228         qp->rcv_cq.cq_buf = dev_buffers_p->mads_rcv_cq_buf;
1229         qp->snd_cq.num_cqes = NUM_MADS_SND_CQES;
1230         qp->rcv_cq.num_cqes = NUM_MADS_RCV_CQES;
1231         qp->snd_cq.arm_db_ctx_idx = MADS_SND_CQ_ARM_DB_IDX;
1232         qp->snd_cq.ci_db_ctx_idx = MADS_SND_CQ_CI_DB_IDX;
1233         qp->rcv_cq.arm_db_ctx_idx = MADS_RCV_CQ_ARM_DB_IDX;
1234         qp->rcv_cq.ci_db_ctx_idx = MADS_RCV_CQ_CI_DB_IDX;
1235         qp->rcv_db_record_index = MADS_RCV_QP_DB_IDX;
1236         qp->snd_db_record_index = MADS_SND_QP_DB_IDX;
1237         qp->qkey = GLOBAL_QKEY;
1238         rc = create_udqp(qp);
1239         if (!rc) {
1240                 *qp_pp = qp;
1241                 *snd_cq_pp = &qp->snd_cq;
1242                 *rcv_cq_pp = &qp->rcv_cq;
1243         }
1244
1245         return rc;
1246 }
1247
1248 static int create_ipoib_qp(void **qp_pp,
1249                            void **snd_cq_pp, void **rcv_cq_pp, __u32 qkey)
1250 {
1251         __u8 i, next_i, j, k;
1252         int rc;
1253         struct udqp_st *qp;
1254         __u32 bus_addr;
1255         __u8 nds;
1256         void *ptr;
1257
1258         qp = &dev_ib_data.ipoib_qp;
1259
1260         /* set the pointer to the receive WQEs buffer */
1261         qp->rcv_wq = dev_buffers_p->ipoib_qp_rcv_queue;
1262
1263         qp->send_buf_sz = IPOIB_SND_BUF_SZ;
1264         qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
1265
1266         qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
1267         qp->recv_wqe_cur_free = NUM_IPOIB_RCV_WQES;
1268
1269         qp->rcv_uar_context =
1270             dev_ib_data.uar_context_base + 8 * IPOIB_RCV_QP_DB_IDX;
1271         qp->send_uar_context =
1272             dev_ib_data.uar_context_base + 8 * IPOIB_SND_QP_DB_IDX;
1273
1274         memset(&qp->rcv_wq[0], 0, NUM_IPOIB_RCV_WQES * sizeof(qp->rcv_wq[0]));
1275         nds = sizeof(qp->rcv_wq[0].wqe) >> 4;
1276         /* iterrate through the list */
1277         for (j = 0, i = 0, next_i = 1;
1278              j < NUM_IPOIB_RCV_WQES;
1279              MOD_INC(i, NUM_IPOIB_RCV_WQES), MOD_INC(next_i,
1280                                                      NUM_IPOIB_RCV_WQES), ++j) {
1281
1282                 /* link the WQE to the next one */
1283                 bus_addr = virt_to_bus(&qp->rcv_wq[next_i].wqe);
1284                 ptr = qp->rcv_wq[i].wqe.control +
1285                     MT_BYTE_OFFSET(arbelprm_wqe_segment_ctrl_recv_st,
1286                                    wqe_segment_next);
1287                 INS_FLD(bus_addr >> 6, ptr, arbelprm_recv_wqe_segment_next_st,
1288                         nda_31_6);
1289                 INS_FLD(nds, ptr, arbelprm_recv_wqe_segment_next_st, nds);
1290
1291                 /* set the allocated buffers */
1292                 qp->rcv_bufs[i] = ib_buffers.ipoib_rcv_buf[i];
1293                 bus_addr = virt_to_bus(qp->rcv_bufs[i]);
1294                 qp->rcv_wq[i].wqe.mpointer[0].local_addr_l = bus_addr;
1295                 qp->rcv_wq[i].wqe.mpointer[0].byte_count = GRH_SIZE;
1296                 bus_addr = virt_to_bus(qp->rcv_bufs[i] + GRH_SIZE);
1297                 qp->rcv_wq[i].wqe.mpointer[1].local_addr_l = bus_addr;
1298                 qp->rcv_wq[i].wqe.mpointer[1].byte_count = IPOIB_RCV_BUF_SZ;
1299
1300                 for (k = 0; k < (((sizeof(qp->rcv_wq[i].wqe)) >> 4) - 1); ++k) {
1301                         qp->rcv_wq[i].wqe.mpointer[k].lkey = INVALID_WQE_LKEY;
1302                 }
1303         }
1304         cpu_to_be_buf(&qp->rcv_wq[0],
1305                       NUM_IPOIB_RCV_WQES * sizeof(qp->rcv_wq[0]));
1306
1307         for (i = 0; i < qp->max_recv_wqes; ++i) {
1308                 qp->rcv_wq[i].wqe_cont.qp = qp;
1309         }
1310
1311         /* set the pointer to the send WQEs buffer */
1312         qp->snd_wq = dev_buffers_p->ipoib_qp_snd_queue;
1313
1314         qp->snd_wqe_alloc_idx = 0;
1315         qp->max_snd_wqes = NUM_IPOIB_SND_WQES;
1316         qp->snd_wqe_cur_free = NUM_IPOIB_SND_WQES;
1317
1318         memset(&qp->snd_wq[0], 0, NUM_IPOIB_SND_WQES * sizeof(qp->snd_wq[i]));
1319         /* iterrate through the list */
1320         for (j = 0, i = 0, next_i = 1;
1321              j < NUM_IPOIB_RCV_WQES;
1322              MOD_INC(i, NUM_IPOIB_SND_WQES), MOD_INC(next_i,
1323                                                      NUM_IPOIB_SND_WQES), ++j) {
1324
1325                 /* link the WQE to the next one */
1326                 bus_addr = virt_to_bus(&qp->snd_wq[next_i].wqe_cont.wqe);
1327                 INS_FLD(bus_addr >> 6, &qp->snd_wq[i].wqe_cont.wqe.next.next,
1328                         arbelprm_wqe_segment_next_st, nda_31_6);
1329
1330                 /* set the allocated buffers */
1331                 qp->snd_bufs[i] = ib_buffers.send_ipoib_buf[i];
1332                 bus_addr = virt_to_bus(qp->snd_bufs[i]);
1333                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].local_addr_l = bus_addr;
1334                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].lkey = dev_ib_data.mkey;
1335
1336         }
1337         cpu_to_be_buf(&qp->snd_wq[0],
1338                       NUM_IPOIB_SND_WQES * sizeof(qp->snd_wq[i]));
1339
1340         for (i = 0; i < qp->max_snd_wqes; ++i) {
1341                 qp->snd_wq[i].wqe_cont.qp = qp;
1342         }
1343
1344         /* qp number and cq numbers are already set up */
1345         qp->snd_cq.cq_buf = dev_buffers_p->ipoib_snd_cq_buf;
1346         qp->rcv_cq.cq_buf = dev_buffers_p->ipoib_rcv_cq_buf;
1347         qp->snd_cq.num_cqes = NUM_IPOIB_SND_CQES;
1348         qp->rcv_cq.num_cqes = NUM_IPOIB_RCV_CQES;
1349         qp->snd_cq.arm_db_ctx_idx = IPOIB_SND_CQ_ARM_DB_IDX;
1350         qp->snd_cq.ci_db_ctx_idx = IPOIB_SND_CQ_CI_DB_IDX;
1351         qp->rcv_cq.arm_db_ctx_idx = IPOIB_RCV_CQ_ARM_DB_IDX;
1352         qp->rcv_cq.ci_db_ctx_idx = IPOIB_RCV_CQ_CI_DB_IDX;
1353         qp->rcv_db_record_index = IPOIB_RCV_QP_DB_IDX;
1354         qp->snd_db_record_index = IPOIB_SND_QP_DB_IDX;
1355         qp->qkey = qkey;
1356         rc = create_udqp(qp);
1357         if (!rc) {
1358                 *qp_pp = qp;
1359                 *snd_cq_pp = &qp->snd_cq;
1360                 *rcv_cq_pp = &qp->rcv_cq;
1361         }
1362
1363         return rc;
1364 }
1365
1366 static int create_udqp(struct udqp_st *qp)
1367 {
1368         int rc, ret = 0;
1369         void *inprm;
1370         struct recv_wqe_st *rcv_wqe;
1371
1372         inprm = dev_buffers_p->inprm_buf;
1373
1374         qp->rcv_cq.arm_db_ctx_pointer =
1375             dev_ib_data.uar_context_base + 8 * qp->rcv_cq.arm_db_ctx_idx;
1376         qp->rcv_cq.ci_db_ctx_pointer =
1377             dev_ib_data.uar_context_base + 8 * qp->rcv_cq.ci_db_ctx_idx;
1378         qp->snd_cq.arm_db_ctx_pointer =
1379             dev_ib_data.uar_context_base + 8 * qp->snd_cq.arm_db_ctx_idx;
1380         qp->snd_cq.ci_db_ctx_pointer =
1381             dev_ib_data.uar_context_base + 8 * qp->snd_cq.ci_db_ctx_idx;
1382
1383         /* create send CQ */
1384         init_cq_buf(qp->snd_cq.cq_buf, qp->snd_cq.num_cqes);
1385         qp->snd_cq.cons_counter = 0;
1386         prep_sw2hw_cq_buf(inprm,
1387                           dev_ib_data.eq.eqn,
1388                           qp->snd_cq.cqn,
1389                           qp->snd_cq.cq_buf,
1390                           qp->snd_cq.ci_db_ctx_idx, qp->snd_cq.arm_db_ctx_idx);
1391
1392         rc = cmd_sw2hw_cq(qp->snd_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1393         if (rc) {
1394                 ret = -1;
1395                 eprintf("");
1396                 goto exit;
1397         }
1398
1399         /* create receive CQ */
1400         init_cq_buf(qp->rcv_cq.cq_buf, qp->rcv_cq.num_cqes);
1401         qp->rcv_cq.cons_counter = 0;
1402         memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
1403         prep_sw2hw_cq_buf(inprm,
1404                           dev_ib_data.eq.eqn,
1405                           qp->rcv_cq.cqn,
1406                           qp->rcv_cq.cq_buf,
1407                           qp->rcv_cq.ci_db_ctx_idx, qp->rcv_cq.arm_db_ctx_idx);
1408
1409         rc = cmd_sw2hw_cq(qp->rcv_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1410         if (rc) {
1411                 ret = -1;
1412                 eprintf("");
1413                 goto undo_snd_cq;
1414         }
1415
1416         prep_rst2init_qpee_buf(inprm,
1417                                qp->snd_cq.cqn,
1418                                qp->rcv_cq.cqn,
1419                                qp->qkey,
1420                                my_log2(qp->max_recv_wqes),
1421                                my_log2(sizeof(qp->rcv_wq[0])) - 4,
1422                                my_log2(qp->max_snd_wqes),
1423                                my_log2(sizeof(qp->snd_wq[0])) - 4,
1424                                virt_to_bus(qp->snd_wq),
1425                                qp->snd_db_record_index,
1426                                virt_to_bus(qp->rcv_wq),
1427                                qp->rcv_db_record_index);
1428
1429         rc = cmd_rst2init_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1430         if (rc) {
1431                 ret = -1;
1432                 eprintf("");
1433                 goto undo_rcv_cq;
1434         }
1435
1436         qp->last_posted_rcv_wqe = NULL;
1437         qp->last_posted_snd_wqe = NULL;
1438
1439         /* post all the buffers to the receive queue */
1440         while (1) {
1441                 /* allocate wqe */
1442                 rcv_wqe = alloc_rcv_wqe(qp);
1443                 if (!rcv_wqe)
1444                         break;
1445
1446                 /* post the buffer */
1447                 rc = post_rcv_buf(qp, rcv_wqe);
1448                 if (rc) {
1449                         ret = -1;
1450                         eprintf("");
1451                         goto undo_rcv_cq;
1452                 }
1453         }
1454
1455         prep_init2rtr_qpee_buf(inprm);
1456         rc = cmd_init2rtr_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1457         if (rc) {
1458                 ret = -1;
1459                 eprintf("");
1460                 goto undo_rcv_cq;
1461         }
1462
1463         memset(inprm, 0, QPCTX_IBUF_SZ);
1464         rc = cmd_rtr2rts_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1465         if (rc) {
1466                 ret = -1;
1467                 eprintf("");
1468                 goto undo_rcv_cq;
1469         }
1470
1471         goto exit;
1472
1473       undo_rcv_cq:
1474         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1475         if (rc)
1476                 eprintf("");
1477
1478       undo_snd_cq:
1479         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1480         if (rc)
1481                 eprintf("");
1482
1483       exit:
1484         return ret;
1485 }
1486
1487 static int destroy_udqp(struct udqp_st *qp)
1488 {
1489         int rc;
1490
1491         rc = cmd_2err_qpee(qp->qpn);
1492         if (rc) {
1493                 eprintf("");
1494                 return rc;
1495         }
1496         tprintf("cmd_2err_qpee(0x%lx) success", qp->qpn);
1497
1498         rc = cmd_2rst_qpee(qp->qpn);
1499         if (rc) {
1500                 eprintf("");
1501                 return rc;
1502         }
1503         tprintf("cmd_2rst_qpee(0x%lx) success", qp->qpn);
1504
1505         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1506         if (rc) {
1507                 eprintf("");
1508                 return rc;
1509         }
1510         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->snd_cq.cqn);
1511
1512         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1513         if (rc) {
1514                 eprintf("");
1515                 return rc;
1516         }
1517         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->rcv_cq.cqn);
1518
1519         return rc;
1520 }
1521
1522 static void prep_send_wqe_buf(void *qph,
1523                               void *avh,
1524                               void *wqeh,
1525                               const void *buf,
1526                               unsigned int offset, __u16 len, __u8 e)
1527 {
1528         struct ud_send_wqe_st *snd_wqe = wqeh;
1529         struct ud_av_st *av = avh;
1530
1531         if (qph) {
1532         }
1533         /* suppress warnings */
1534         INS_FLD_TO_BE(e, &snd_wqe->next.control,
1535                       arbelprm_wqe_segment_ctrl_send_st, e);
1536         INS_FLD_TO_BE(1, &snd_wqe->next.control,
1537                       arbelprm_wqe_segment_ctrl_send_st, always1);
1538         INS_FLD_TO_BE(1, &snd_wqe->next.next, arbelprm_wqe_segment_next_st,
1539                       always1);
1540         memcpy(&snd_wqe->udseg, &av->av, sizeof av->av);
1541         INS_FLD_TO_BE(av->dest_qp, snd_wqe->udseg.av,
1542                       arbelprm_wqe_segment_ud_st, destination_qp);
1543         INS_FLD_TO_BE(av->qkey, snd_wqe->udseg.av, arbelprm_wqe_segment_ud_st,
1544                       q_key);
1545
1546         if (buf) {
1547                 memcpy(bus_to_virt
1548                        (be32_to_cpu(snd_wqe->mpointer[0].local_addr_l)) +
1549                        offset, buf, len);
1550                 len += offset;
1551         }
1552         snd_wqe->mpointer[0].byte_count = cpu_to_be32(len);
1553 }
1554
1555 static void *alloc_ud_av(void)
1556 {
1557         u8 next_free;
1558
1559         if (dev_ib_data.udav.udav_next_free == FL_EOL) {
1560                 return NULL;
1561         }
1562
1563         next_free = dev_ib_data.udav.udav_next_free;
1564         dev_ib_data.udav.udav_next_free =
1565             dev_buffers_p->av_array[next_free].ud_av.next_free;
1566         tprintf("allocated udav %d", next_free);
1567         return &dev_buffers_p->av_array[next_free].ud_av;
1568 }
1569
1570 static void free_ud_av(void *avh)
1571 {
1572         union ud_av_u *avu;
1573         __u8 idx, old_idx;
1574         struct ud_av_st *av = avh;
1575
1576         avu = (union ud_av_u *)av;
1577
1578         idx = avu - dev_buffers_p->av_array;
1579         tprintf("freeing udav idx=%d", idx);
1580         old_idx = dev_ib_data.udav.udav_next_free;
1581         dev_ib_data.udav.udav_next_free = idx;
1582         avu->ud_av.next_free = old_idx;
1583 }
1584
1585 static int update_cq_cons_idx(struct cq_st *cq)
1586 {
1587         /* write doorbell record */
1588         WRITE_DWORD_VOL(cq->ci_db_ctx_pointer, 0, htonl(cq->cons_counter));
1589
1590         /*
1591            INS_FLD_TO_BE(cq->cons_counter,
1592            cq->ci_db_ctx_pointer,
1593            arbelprm_cq_arm_db_record_st,
1594            counter);
1595
1596            INS_FLD_TO_BE(cq->cqn,
1597            cq->ci_db_ctx_pointer,
1598            arbelprm_cq_arm_db_record_st,
1599            cq_number);
1600
1601            INS_FLD_TO_BE(1,
1602            cq->ci_db_ctx_pointer,
1603            arbelprm_cq_arm_db_record_st,
1604            res); */
1605
1606         return 0;
1607 }
1608
1609 static int poll_cq(void *cqh, union cqe_st *cqe_p, u8 * num_cqes)
1610 {
1611         union cqe_st cqe;
1612         int rc;
1613         u32 *ptr;
1614         struct cq_st *cq = cqh;
1615         __u32 cons_idx = cq->cons_counter & (cq->num_cqes - 1);
1616
1617         ptr = (u32 *) (&(cq->cq_buf[cons_idx]));
1618         barrier();
1619         if ((ptr[7] & 0x80000000) == 0) {
1620                 cqe = cq->cq_buf[cons_idx];
1621                 be_to_cpu_buf(&cqe, sizeof(cqe));
1622                 *cqe_p = cqe;
1623                 ptr[7] = 0x80000000;
1624                 barrier();
1625                 cq->cons_counter++;
1626                 rc = update_cq_cons_idx(cq);
1627                 if (rc) {
1628                         return rc;
1629                 }
1630                 *num_cqes = 1;
1631         } else
1632                 *num_cqes = 0;
1633
1634         return 0;
1635 }
1636
1637 static void dev2ib_cqe(struct ib_cqe_st *ib_cqe_p, union cqe_st *cqe_p)
1638 {
1639         __u8 opcode;
1640         __u32 wqe_addr_ba;
1641
1642         opcode =
1643             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st, opcode);
1644         if (opcode >= CQE_ERROR_OPCODE)
1645                 ib_cqe_p->is_error = 1;
1646         else
1647                 ib_cqe_p->is_error = 0;
1648
1649         ib_cqe_p->is_send =
1650             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st, s);
1651         wqe_addr_ba =
1652             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st,
1653                    wqe_adr) << 6;
1654         ib_cqe_p->wqe = bus_to_virt(wqe_addr_ba);
1655
1656         ib_cqe_p->count =
1657             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st,
1658                    byte_cnt);
1659 }
1660
1661 static int ib_poll_cq(void *cqh, struct ib_cqe_st *ib_cqe_p, u8 * num_cqes)
1662 {
1663         int rc;
1664         union cqe_st cqe;
1665         struct cq_st *cq = cqh;
1666         __u8 opcode;
1667
1668         rc = poll_cq(cq, &cqe, num_cqes);
1669         if (rc || ((*num_cqes) == 0)) {
1670                 return rc;
1671         }
1672
1673         dev2ib_cqe(ib_cqe_p, &cqe);
1674
1675         opcode =
1676             EX_FLD(cqe.good_cqe, arbelprm_completion_queue_entry_st, opcode);
1677         if (opcode >= CQE_ERROR_OPCODE) {
1678                 struct ud_send_wqe_st *wqe_p, wqe;
1679                 __u32 *ptr;
1680                 unsigned int i;
1681
1682                 wqe_p =
1683                     bus_to_virt(EX_FLD
1684                                 (cqe.error_cqe,
1685                                  arbelprm_completion_with_error_st,
1686                                  wqe_addr) << 6);
1687                 eprintf("syndrome=0x%lx",
1688                         EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
1689                                syndrome));
1690                 eprintf("vendor_syndrome=0x%lx",
1691                         EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
1692                                vendor_code));
1693                 eprintf("wqe_addr=0x%lx", wqe_p);
1694                 eprintf("myqpn=0x%lx",
1695                         EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
1696                                myqpn));
1697                 memcpy(&wqe, wqe_p, sizeof wqe);
1698                 be_to_cpu_buf(&wqe, sizeof wqe);
1699
1700                 eprintf("dumping wqe...");
1701                 ptr = (__u32 *) (&wqe);
1702                 for (i = 0; i < sizeof wqe; i += 4) {
1703                         printf("%lx : ", ptr[i >> 2]);
1704                 }
1705
1706         }
1707
1708         return rc;
1709 }
1710
1711 /* always work on ipoib qp */
1712 static int add_qp_to_mcast_group(union ib_gid_u mcast_gid, __u8 add)
1713 {
1714         void *mg;
1715         __u8 *tmp;
1716         int rc;
1717         __u16 mgid_hash;
1718         void *mgmqp_p;
1719
1720         tmp = dev_buffers_p->inprm_buf;
1721         memcpy(tmp, mcast_gid.raw, 16);
1722         be_to_cpu_buf(tmp, 16);
1723         rc = cmd_mgid_hash(tmp, &mgid_hash);
1724         if (!rc) {
1725                 mg = (void *)dev_buffers_p->inprm_buf;
1726                 memset(mg, 0, MT_STRUCT_SIZE(arbelprm_mgm_entry_st));
1727                 INS_FLD(mcast_gid.as_u32.dw[0], mg, arbelprm_mgm_entry_st,
1728                         mgid_128_96);
1729                 INS_FLD(mcast_gid.as_u32.dw[1], mg, arbelprm_mgm_entry_st,
1730                         mgid_95_64);
1731                 INS_FLD(mcast_gid.as_u32.dw[2], mg, arbelprm_mgm_entry_st,
1732                         mgid_63_32);
1733                 INS_FLD(mcast_gid.as_u32.dw[3], mg, arbelprm_mgm_entry_st,
1734                         mgid_31_0);
1735                 be_to_cpu_buf(mg +
1736                               MT_BYTE_OFFSET(arbelprm_mgm_entry_st,
1737                                              mgid_128_96), 16);
1738                 mgmqp_p = mg + MT_BYTE_OFFSET(arbelprm_mgm_entry_st, mgmqp_0);
1739                 INS_FLD(dev_ib_data.ipoib_qp.qpn, mgmqp_p, arbelprm_mgmqp_st,
1740                         qpn_i);
1741                 INS_FLD(add, mgmqp_p, arbelprm_mgmqp_st, qi);
1742                 rc = cmd_write_mgm(mg, mgid_hash);
1743         }
1744         return rc;
1745 }
1746
1747 static int clear_interrupt(void)
1748 {
1749         writel(dev_ib_data.clr_int_data, dev_ib_data.clr_int_addr);
1750         return 0;
1751 }
1752
1753 static struct ud_send_wqe_st *alloc_send_wqe(udqp_t qph)
1754 {
1755         struct udqp_st *qp = qph;
1756         __u32 idx;
1757
1758         if (qp->snd_wqe_cur_free) {
1759                 qp->snd_wqe_cur_free--;
1760                 idx = qp->snd_wqe_alloc_idx;
1761                 qp->snd_wqe_alloc_idx =
1762                     (qp->snd_wqe_alloc_idx + 1) & (qp->max_snd_wqes - 1);
1763                 return &qp->snd_wq[idx].wqe_cont.wqe;
1764         }
1765
1766         return NULL;
1767 }
1768
1769 static struct recv_wqe_st *alloc_rcv_wqe(struct udqp_st *qp)
1770 {
1771         __u32 idx;
1772
1773         if (qp->recv_wqe_cur_free) {
1774                 qp->recv_wqe_cur_free--;
1775                 idx = qp->recv_wqe_alloc_idx;
1776                 qp->recv_wqe_alloc_idx =
1777                     (qp->recv_wqe_alloc_idx + 1) & (qp->max_recv_wqes - 1);
1778                 return &qp->rcv_wq[idx].wqe_cont.wqe;
1779         }
1780
1781         return NULL;
1782 }
1783
1784 static int free_send_wqe(struct ud_send_wqe_st *wqe)
1785 {
1786         struct udqp_st *qp = ((struct ude_send_wqe_cont_st *)wqe)->qp;
1787         qp->snd_wqe_cur_free++;
1788
1789         return 0;
1790 }
1791
1792 static int free_rcv_wqe(struct recv_wqe_st *wqe)
1793 {
1794         struct udqp_st *qp = ((struct recv_wqe_cont_st *)wqe)->qp;
1795         qp->recv_wqe_cur_free++;
1796
1797         return 0;
1798 }
1799
1800 static int free_wqe(void *wqe)
1801 {
1802         int rc = 0;
1803         struct recv_wqe_st *rcv_wqe;
1804
1805 //      tprintf("free wqe= 0x%x", wqe);
1806         if ((wqe >= (void *)(dev_ib_data.ipoib_qp.rcv_wq)) &&
1807             (wqe <
1808              (void *)(&dev_ib_data.ipoib_qp.rcv_wq[NUM_IPOIB_RCV_WQES]))) {
1809                 /* ipoib receive wqe */
1810                 free_rcv_wqe(wqe);
1811                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.ipoib_qp);
1812                 if (rcv_wqe) {
1813                         rc = post_rcv_buf(&dev_ib_data.ipoib_qp, rcv_wqe);
1814                         if (rc) {
1815                                 eprintf("");
1816                         }
1817                 }
1818         } else if (wqe >= (void *)(dev_ib_data.ipoib_qp.snd_wq) &&
1819                    wqe <
1820                    (void *)(&dev_ib_data.ipoib_qp.snd_wq[NUM_IPOIB_SND_WQES])) {
1821                 /* ipoib send wqe */
1822                 free_send_wqe(wqe);
1823         } else if (wqe >= (void *)(dev_ib_data.mads_qp.rcv_wq) &&
1824                    wqe <
1825                    (void *)(&dev_ib_data.mads_qp.rcv_wq[NUM_MADS_RCV_WQES])) {
1826                 /* mads receive wqe */
1827                 free_rcv_wqe(wqe);
1828                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.mads_qp);
1829                 if (rcv_wqe) {
1830                         rc = post_rcv_buf(&dev_ib_data.mads_qp, rcv_wqe);
1831                         if (rc) {
1832                                 eprintf("");
1833                         }
1834                 }
1835         } else if (wqe >= (void *)(dev_ib_data.mads_qp.snd_wq) &&
1836                    wqe <
1837                    (void *)(&dev_ib_data.mads_qp.snd_wq[NUM_MADS_SND_WQES])) {
1838                 /* mads send wqe */
1839                 free_send_wqe(wqe);
1840         } else {
1841                 rc = -1;
1842                 eprintf("");
1843         }
1844
1845         return rc;
1846 }
1847
1848 static int update_eq_cons_idx(struct eq_st *eq)
1849 {
1850         writel(eq->cons_counter, eq->ci_base_base_addr);
1851         return 0;
1852 }
1853
1854 static void dev2ib_eqe(struct ib_eqe_st *ib_eqe_p, struct eqe_t *eqe_p)
1855 {
1856         void *tmp;
1857
1858         ib_eqe_p->event_type =
1859             EX_FLD(eqe_p, arbelprm_event_queue_entry_st, event_type);
1860
1861         tmp = eqe_p + MT_BYTE_OFFSET(arbelprm_event_queue_entry_st, event_data);
1862         ib_eqe_p->cqn = EX_FLD(tmp, arbelprm_completion_event_st, cqn);
1863 }
1864
1865 static int poll_eq(struct ib_eqe_st *ib_eqe_p, __u8 * num_eqes)
1866 {
1867         struct eqe_t eqe;
1868         u8 owner;
1869         int rc;
1870         u32 *ptr;
1871         struct eq_st *eq = &dev_ib_data.eq;
1872         __u32 cons_idx = eq->cons_counter & (eq->eq_size - 1);
1873
1874         ptr = (u32 *) (&(eq->eq_buf[cons_idx]));
1875         owner = (ptr[7] & 0x80000000) ? OWNER_HW : OWNER_SW;
1876         if (owner == OWNER_SW) {
1877                 eqe = eq->eq_buf[cons_idx];
1878                 be_to_cpu_buf(&eqe, sizeof(eqe));
1879                 dev2ib_eqe(ib_eqe_p, &eqe);
1880                 ptr[7] |= 0x80000000;
1881                 eq->eq_buf[cons_idx] = eqe;
1882                 eq->cons_counter++;
1883                 rc = update_eq_cons_idx(eq);
1884                 if (rc) {
1885                         return -1;
1886                 }
1887                 *num_eqes = 1;
1888         } else {
1889                 *num_eqes = 0;
1890         }
1891         return 0;
1892 }
1893
1894 static int ib_device_close(void)
1895 {
1896         iounmap(memfree_pci_dev.uar);
1897         iounmap(memfree_pci_dev.cr_space);
1898         return 0;
1899 }
1900
1901 static __u32 dev_get_qpn(void *qph)
1902 {
1903         struct udqp_st *qp = qph;
1904
1905         return qp->qpn;
1906 }
1907
1908 static void dev_post_dbell(void *dbell, __u32 offset)
1909 {
1910         __u32 *ptr;
1911         unsigned long address;
1912
1913         ptr = dbell;
1914
1915         if (((ptr[0] >> 24) & 0xff) != 1) {
1916                 eprintf("");
1917         }
1918         tprintf("ptr[0]= 0x%lx", ptr[0]);
1919         tprintf("ptr[1]= 0x%lx", ptr[1]);
1920         address = (unsigned long)(memfree_pci_dev.uar) + offset;
1921         tprintf("va=0x%lx pa=0x%lx", address,
1922                 virt_to_bus((const void *)address));
1923         writel(htonl(ptr[0]), memfree_pci_dev.uar + offset);
1924         barrier();
1925         address += 4;
1926         tprintf("va=0x%lx pa=0x%lx", address,
1927                 virt_to_bus((const void *)address));
1928         writel(htonl(ptr[1]), address /*memfree_pci_dev.uar + offset + 4 */ );
1929 }