fix cleanup flow for memfree devices
[etherboot.git] / src / drivers / net / mlx_ipoib / ib_mt25218.c
1 /*
2   This software is available to you under a choice of one of two
3   licenses.  You may choose to be licensed under the terms of the GNU
4   General Public License (GPL) Version 2, available at
5   <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD
6   license, available in the LICENSE.TXT file accompanying this
7   software.  These details are also available at
8   <http://openib.org/license.html>.
9
10   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13   NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14   BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15   ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16   CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17   SOFTWARE.
18
19   Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
20 */
21
22 #include "mt25218.h"
23 #include "ib_driver.h"
24 #include "pci.h"
25
26 #define MOD_INC(counter, max_count) (counter) = ((counter)+1) & ((max_count) - 1)
27
28 #define breakpoint {volatile __u32 *p=(__u32 *)0x1234;printf("breakpoint\n");do {} while((*p) != 0x1234);}
29
30 #define WRITE_BYTE_VOL(addr, off, val) \
31     do { \
32         (*((volatile __u8 *)(((volatile __u8 *)(addr)) + off))) = (val); \
33     } while(0)
34
35 #define WRITE_WORD_VOL(addr, off, val) \
36     do { \
37         (*((volatile __u16 *)(((volatile __u8 *)(addr)) + off))) = (val); \
38     } while(0)
39
40 #define WRITE_DWORD_VOL(addr, off, val) \
41     do { \
42         (*((volatile __u32 *)(((volatile __u8 *)(addr)) + off))) = (val); \
43     } while(0)
44
45 struct device_buffers_st {
46         /* inprm and outprm do not have alignnemet constraint sice that
47            is acheived programatically */
48         u8 inprm_buf[INPRM_BUF_SZ];
49         u8 outprm_buf[OUTPRM_BUF_SZ];
50         union recv_wqe_u mads_qp_rcv_queue[NUM_MADS_RCV_WQES]
51             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
52         union recv_wqe_u ipoib_qp_rcv_queue[NUM_IPOIB_RCV_WQES]
53             __attribute__ ((aligned(RECV_WQE_U_ALIGN)));
54         union ud_send_wqe_u mads_qp_snd_queue[NUM_MADS_SND_WQES]
55             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
56         union ud_send_wqe_u ipoib_qp_snd_queue[NUM_IPOIB_SND_WQES]
57             __attribute__ ((aligned(UD_SEND_WQE_U_ALIGN)));
58         struct eqe_t eq_buf[1 << LOG2_EQ_SZ]
59             __attribute__ ((aligned(sizeof(struct eqe_t))));
60         union cqe_st mads_snd_cq_buf[NUM_MADS_SND_CQES]
61             __attribute__ ((aligned(sizeof(union cqe_st))));
62         union cqe_st ipoib_snd_cq_buf[NUM_IPOIB_SND_CQES]
63             __attribute__ ((aligned(sizeof(union cqe_st))));
64         union cqe_st mads_rcv_cq_buf[NUM_MADS_RCV_CQES]
65             __attribute__ ((aligned(sizeof(union cqe_st))));
66         union cqe_st ipoib_rcv_cq_buf[NUM_IPOIB_RCV_CQES]
67             __attribute__ ((aligned(sizeof(union cqe_st))));
68         union ud_av_u av_array[NUM_AVS];
69 } __attribute__ ((packed));
70
71 #define STRUCT_ALIGN_SZ 4096
72 #define SRC_BUF_SZ (sizeof(struct device_buffers_st) + STRUCT_ALIGN_SZ - 1)
73
74 /* the following must be kept in this order
75    for the memory region to cover the buffers */
76 static u8 src_buf[SRC_BUF_SZ];
77 static struct ib_buffers_st ib_buffers;
78 static __u32 memreg_size;
79 /* end of order constraint */
80
81 struct phys_mem_desc_st {
82         unsigned long base;
83         unsigned long offset;
84 };
85
86 static struct phys_mem_desc_st phys_mem;
87
88 static struct dev_pci_struct memfree_pci_dev;
89 static struct device_buffers_st *dev_buffers_p;
90 static struct device_ib_data_st dev_ib_data;
91
92
93
94 struct map_icm_st icm_map_obj;
95
96 static int gw_write_cr(__u32 addr, __u32 data)
97 {
98         writel(htonl(data), memfree_pci_dev.cr_space + addr);
99         return 0;
100 }
101
102 static int gw_read_cr(__u32 addr, __u32 * result)
103 {
104         *result = ntohl(readl(memfree_pci_dev.cr_space + addr));
105         return 0;
106 }
107
108 static int reset_hca(void)
109 {
110         return gw_write_cr(MEMFREE_RESET_OFFSET, 1);
111 }
112
113 static int ib_device_init(struct pci_device *dev)
114 {
115         int i;
116         int rc;
117
118         tprintf("");
119
120         memset(&dev_ib_data, 0, sizeof dev_ib_data);
121
122         /* save bars */
123         tprintf("bus=%d devfn=0x%x", dev->bus, dev->devfn);
124         for (i = 0; i < 6; ++i) {
125                 memfree_pci_dev.dev.bar[i] =
126                     pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
127                 tprintf("bar[%d]= 0x%08lx", i, memfree_pci_dev.dev.bar[i]);
128         }
129
130         tprintf("");
131         /* save config space */
132         for (i = 0; i < 64; ++i) {
133                 rc = pci_read_config_dword(dev, i << 2,
134                                            &memfree_pci_dev.dev.
135                                            dev_config_space[i]);
136                 if (rc) {
137                         eprintf("");
138                         return rc;
139                 }
140                 tprintf("config[%d]= 0x%08lx", i << 2,
141                         memfree_pci_dev.dev.dev_config_space[i]);
142         }
143
144         tprintf("");
145         memfree_pci_dev.dev.dev = dev;
146
147         /* map cr-space */
148         memfree_pci_dev.cr_space =
149             ioremap(memfree_pci_dev.dev.bar[0], 0x100000);
150         if (!memfree_pci_dev.cr_space) {
151                 eprintf("");
152                 return -1;
153         }
154
155         /* map uar */
156         memfree_pci_dev.uar =
157             ioremap(memfree_pci_dev.dev.bar[2] + UAR_IDX * 0x1000, 0x1000);
158         if (!memfree_pci_dev.uar) {
159                 eprintf("");
160                 return -1;
161         }
162         tprintf("uar_base (pa:va) = 0x%lx 0x%lx",
163                 memfree_pci_dev.dev.bar[2] + UAR_IDX * 0x1000,
164                 memfree_pci_dev.uar);
165
166         tprintf("");
167
168         return 0;
169 }
170
171 static inline unsigned long lalign(unsigned long buf, unsigned long align)
172 {
173         return (unsigned long)((buf + align - 1) &
174                                (~(((unsigned long)align) - 1)));
175 }
176
177 static int init_dev_data(void)
178 {
179         unsigned long tmp;
180         unsigned long reserve_size = 32 * 1024 * 1024;
181
182         tmp = lalign(virt_to_bus(src_buf), STRUCT_ALIGN_SZ);
183
184         dev_buffers_p = bus_to_virt(tmp);
185         memreg_size = (__u32) (&memreg_size) - (__u32) dev_buffers_p;
186         tprintf("src_buf=0x%lx, dev_buffers_p=0x%lx, memreg_size=0x%x", src_buf,
187                 dev_buffers_p, memreg_size);
188
189         tprintf("inprm: va=0x%lx, pa=0x%lx", dev_buffers_p->inprm_buf,
190                 virt_to_bus(dev_buffers_p->inprm_buf));
191         tprintf("outprm: va=0x%lx, pa=0x%lx", dev_buffers_p->outprm_buf,
192                 virt_to_bus(dev_buffers_p->outprm_buf));
193
194         phys_mem.base =
195             (virt_to_phys(_text) - reserve_size) & (~(reserve_size - 1));
196
197         phys_mem.offset = 0;
198
199         return 0;
200 }
201
202 static int restore_config(void)
203 {
204         int i;
205         int rc;
206
207         for (i = 0; i < 64; ++i) {
208                 if (i != 22 && i != 23) {
209                         rc = pci_write_config_dword(memfree_pci_dev.dev.dev,
210                                                     i << 2,
211                                                     memfree_pci_dev.dev.
212                                                     dev_config_space[i]);
213                         if (rc) {
214                                 return rc;
215                         }
216                 }
217         }
218         return 0;
219 }
220
221 static void prep_init_hca_buf(struct init_hca_st *init_hca_p, void *buf)
222 {
223         unsigned long ptr;
224         __u8 shift;
225
226         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_init_hca_st));
227
228         ptr = (unsigned long)buf +
229             MT_BYTE_OFFSET(arbelprm_init_hca_st,
230                            qpc_eec_cqc_eqc_rdb_parameters);
231
232         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, qpc_base_addr_l);
233         INS_FLD(init_hca_p->qpc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
234                 qpc_base_addr_h);
235         INS_FLD(init_hca_p->qpc_base_addr_l >> shift, ptr,
236                 arbelprm_qpcbaseaddr_st, qpc_base_addr_l);
237         INS_FLD(init_hca_p->log_num_of_qp, ptr, arbelprm_qpcbaseaddr_st,
238                 log_num_of_qp);
239
240         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, eec_base_addr_l);
241         INS_FLD(init_hca_p->eec_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
242                 eec_base_addr_h);
243         INS_FLD(init_hca_p->eec_base_addr_l >> shift, ptr,
244                 arbelprm_qpcbaseaddr_st, eec_base_addr_l);
245         INS_FLD(init_hca_p->log_num_of_ee, ptr, arbelprm_qpcbaseaddr_st,
246                 log_num_of_ee);
247
248         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, srqc_base_addr_l);
249         INS_FLD(init_hca_p->srqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
250                 srqc_base_addr_h);
251         INS_FLD(init_hca_p->srqc_base_addr_l >> shift, ptr,
252                 arbelprm_qpcbaseaddr_st, srqc_base_addr_l);
253         INS_FLD(init_hca_p->log_num_of_srq, ptr, arbelprm_qpcbaseaddr_st,
254                 log_num_of_srq);
255
256         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, cqc_base_addr_l);
257         INS_FLD(init_hca_p->cqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
258                 cqc_base_addr_h);
259         INS_FLD(init_hca_p->cqc_base_addr_l >> shift, ptr,
260                 arbelprm_qpcbaseaddr_st, cqc_base_addr_l);
261         INS_FLD(init_hca_p->log_num_of_cq, ptr, arbelprm_qpcbaseaddr_st,
262                 log_num_of_cq);
263
264         INS_FLD(init_hca_p->eqpc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
265                 eqpc_base_addr_h);
266         INS_FLD(init_hca_p->eqpc_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
267                 eqpc_base_addr_l);
268
269         INS_FLD(init_hca_p->eeec_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
270                 eeec_base_addr_h);
271         INS_FLD(init_hca_p->eeec_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
272                 eeec_base_addr_l);
273
274         shift = 32 - MT_BIT_SIZE(arbelprm_qpcbaseaddr_st, eqc_base_addr_l);
275         INS_FLD(init_hca_p->eqc_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
276                 eqc_base_addr_h);
277         INS_FLD(init_hca_p->eqc_base_addr_l >> shift, ptr,
278                 arbelprm_qpcbaseaddr_st, eqc_base_addr_l);
279         INS_FLD(init_hca_p->log_num_of_eq, ptr, arbelprm_qpcbaseaddr_st,
280                 log_num_eq);
281
282         INS_FLD(init_hca_p->rdb_base_addr_h, ptr, arbelprm_qpcbaseaddr_st,
283                 rdb_base_addr_h);
284         INS_FLD(init_hca_p->rdb_base_addr_l, ptr, arbelprm_qpcbaseaddr_st,
285                 rdb_base_addr_l);
286
287         ptr = (unsigned long)buf +
288             MT_BYTE_OFFSET(arbelprm_init_hca_st, multicast_parameters);
289
290         INS_FLD(init_hca_p->mc_base_addr_h, ptr, arbelprm_multicastparam_st,
291                 mc_base_addr_h);
292         INS_FLD(init_hca_p->mc_base_addr_l, ptr, arbelprm_multicastparam_st,
293                 mc_base_addr_l);
294         INS_FLD(init_hca_p->log_mc_table_entry_sz, ptr,
295                 arbelprm_multicastparam_st, log_mc_table_entry_sz);
296         INS_FLD(init_hca_p->mc_table_hash_sz, ptr, arbelprm_multicastparam_st,
297                 mc_table_hash_sz);
298         INS_FLD(init_hca_p->log_mc_table_sz, ptr, arbelprm_multicastparam_st,
299                 log_mc_table_sz);
300
301         ptr = (unsigned long)buf +
302             MT_BYTE_OFFSET(arbelprm_init_hca_st, tpt_parameters);
303
304         INS_FLD(init_hca_p->mpt_base_addr_h, ptr, arbelprm_tptparams_st,
305                 mpt_base_adr_h);
306         INS_FLD(init_hca_p->mpt_base_addr_l, ptr, arbelprm_tptparams_st,
307                 mpt_base_adr_l);
308         INS_FLD(init_hca_p->log_mpt_sz, ptr, arbelprm_tptparams_st, log_mpt_sz);
309         INS_FLD(init_hca_p->mtt_base_addr_h, ptr, arbelprm_tptparams_st,
310                 mtt_base_addr_h);
311         INS_FLD(init_hca_p->mtt_base_addr_l, ptr, arbelprm_tptparams_st,
312                 mtt_base_addr_l);
313
314         ptr = (unsigned long)buf +
315             MT_BYTE_OFFSET(arbelprm_init_hca_st, uar_parameters);
316
317         INS_FLD(init_hca_p->log_max_uars, ptr, arbelprm_uar_params_st,
318                 log_max_uars);
319
320 }
321
322 static void prep_sw2hw_mpt_buf(void *buf, __u32 mkey)
323 {
324         INS_FLD(1, buf, arbelprm_mpt_st, lw);
325         INS_FLD(1, buf, arbelprm_mpt_st, lr);
326         INS_FLD(1, buf, arbelprm_mpt_st, pa);
327         INS_FLD(1, buf, arbelprm_mpt_st, r_w);
328         INS_FLD(mkey, buf, arbelprm_mpt_st, mem_key);
329         INS_FLD(GLOBAL_PD, buf, arbelprm_mpt_st, pd);
330         INS_FLD(virt_to_bus(dev_buffers_p), buf, arbelprm_mpt_st,
331                 start_address_l);
332         INS_FLD(memreg_size, buf, arbelprm_mpt_st, reg_wnd_len_l);
333 }
334
335 static void prep_sw2hw_eq_buf(void *buf, struct eqe_t *eq_buf)
336 {
337         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_eqc_st));
338
339         INS_FLD(0xa, buf, arbelprm_eqc_st, st); /* fired */
340         INS_FLD(virt_to_bus(eq_buf), buf, arbelprm_eqc_st, start_address_l);
341         INS_FLD(LOG2_EQ_SZ, buf, arbelprm_eqc_st, log_eq_size);
342         INS_FLD(GLOBAL_PD, buf, arbelprm_eqc_st, pd);
343         INS_FLD(dev_ib_data.mkey, buf, arbelprm_eqc_st, lkey);
344 }
345
346 static void init_eq_buf(void *eq_buf)
347 {
348         struct eqe_t *eq = eq_buf;
349         int i, num_eqes = 1 << LOG2_EQ_SZ;
350
351         memset(eq, 0, num_eqes * sizeof eq[0]);
352         for (i = 0; i < num_eqes; ++i)
353                 WRITE_BYTE_VOL(&eq[i], EQE_OWNER_OFFSET, EQE_OWNER_VAL_HW);
354 }
355
356 static void prep_init_ib_buf(void *buf)
357 {
358         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_init_ib_st));
359
360         INS_FLD(MTU_2048, buf, arbelprm_init_ib_st, mtu_cap);
361         INS_FLD(3, buf, arbelprm_init_ib_st, port_width_cap);
362         INS_FLD(1, buf, arbelprm_init_ib_st, vl_cap);
363         INS_FLD(1, buf, arbelprm_init_ib_st, max_gid);
364         INS_FLD(64, buf, arbelprm_init_ib_st, max_pkey);
365 }
366
367 static void prep_sw2hw_cq_buf(void *buf, __u8 eqn,
368                               __u32 cqn,
369                               union cqe_st *cq_buf,
370                               __u32 cq_ci_db_record, __u32 cq_state_db_record)
371 {
372         memset(buf, 0, MT_STRUCT_SIZE(arbelprm_completion_queue_context_st));
373
374         INS_FLD(0xA, buf, arbelprm_completion_queue_context_st, st);
375         INS_FLD(virt_to_bus(cq_buf), buf, arbelprm_completion_queue_context_st,
376                 start_address_l);
377         INS_FLD(LOG2_CQ_SZ, buf, arbelprm_completion_queue_context_st,
378                 log_cq_size);
379         INS_FLD(dev_ib_data.uar_idx, buf, arbelprm_completion_queue_context_st,
380                 usr_page);
381         INS_FLD(eqn, buf, arbelprm_completion_queue_context_st, c_eqn);
382         INS_FLD(GLOBAL_PD, buf, arbelprm_completion_queue_context_st, pd);
383         INS_FLD(dev_ib_data.mkey, buf, arbelprm_completion_queue_context_st,
384                 l_key);
385         INS_FLD(cqn, buf, arbelprm_completion_queue_context_st, cqn);
386         INS_FLD(cq_ci_db_record, buf, arbelprm_completion_queue_context_st,
387                 cq_ci_db_record);
388         INS_FLD(cq_state_db_record, buf, arbelprm_completion_queue_context_st,
389                 cq_state_db_record);
390 }
391
392 static void prep_rst2init_qpee_buf(void *buf,
393                                    __u32 snd_cqn,
394                                    __u32 rcv_cqn,
395                                    __u32 qkey,
396                                    __u32 log_rq_size,
397                                    __u32 log_rq_stride,
398                                    __u32 log_sq_size,
399                                    __u32 log_sq_stride,
400                                    __u32 snd_wqe_base_adr_l,
401                                    __u32 snd_db_record_index,
402                                    __u32 rcv_wqe_base_adr_l,
403                                    __u32 rcv_db_record_index)
404 {
405         void *tmp;
406         int shift;
407         struct qp_ee_state_tarnisition_st *prm = buf;
408
409         memset(buf, 0, sizeof *prm);
410
411         tprintf("snd_cqn=0x%lx", snd_cqn);
412         tprintf("rcv_cqn=0x%lx", rcv_cqn);
413         tprintf("qkey=0x%lx", qkey);
414         tprintf("log_rq_size=0x%lx", log_rq_size);
415         tprintf("log_rq_stride=0x%lx", log_rq_stride);
416         tprintf("log_sq_size=0x%lx", log_sq_size);
417         tprintf("log_sq_stride=0x%lx", log_sq_stride);
418         tprintf("snd_wqe_base_adr_l=0x%lx", snd_wqe_base_adr_l);
419         tprintf("snd_db_record_index=0x%lx", snd_db_record_index);
420         tprintf("rcv_wqe_base_adr_l=0x%lx", rcv_wqe_base_adr_l);
421         tprintf("rcv_db_record_index=0x%lx", rcv_db_record_index);
422
423         tmp = &prm->ctx;
424         INS_FLD(TS_UD, tmp, arbelprm_queue_pair_ee_context_entry_st, st);
425         INS_FLD(PM_STATE_MIGRATED, tmp, arbelprm_queue_pair_ee_context_entry_st,
426                 pm_state);
427         INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, de);
428         INS_FLD(MTU_2048, tmp, arbelprm_queue_pair_ee_context_entry_st, mtu);
429         INS_FLD(11, tmp, arbelprm_queue_pair_ee_context_entry_st, msg_max);
430         INS_FLD(log_rq_size, tmp, arbelprm_queue_pair_ee_context_entry_st,
431                 log_rq_size);
432         INS_FLD(log_rq_stride, tmp, arbelprm_queue_pair_ee_context_entry_st,
433                 log_rq_stride);
434         INS_FLD(log_sq_size, tmp, arbelprm_queue_pair_ee_context_entry_st,
435                 log_sq_size);
436         INS_FLD(log_sq_stride, tmp, arbelprm_queue_pair_ee_context_entry_st,
437                 log_sq_stride);
438         INS_FLD(dev_ib_data.uar_idx, tmp,
439                 arbelprm_queue_pair_ee_context_entry_st, usr_page);
440         INS_FLD(GLOBAL_PD, tmp, arbelprm_queue_pair_ee_context_entry_st, pd);
441         INS_FLD(dev_ib_data.mkey, tmp, arbelprm_queue_pair_ee_context_entry_st,
442                 wqe_lkey);
443         INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, ssc);
444         INS_FLD(snd_cqn, tmp, arbelprm_queue_pair_ee_context_entry_st, cqn_snd);
445         shift =
446             32 - MT_BIT_SIZE(arbelprm_queue_pair_ee_context_entry_st,
447                              snd_wqe_base_adr_l);
448         INS_FLD(snd_wqe_base_adr_l >> shift, tmp,
449                 arbelprm_queue_pair_ee_context_entry_st, snd_wqe_base_adr_l);
450         INS_FLD(snd_db_record_index, tmp,
451                 arbelprm_queue_pair_ee_context_entry_st, snd_db_record_index);
452         INS_FLD(1, tmp, arbelprm_queue_pair_ee_context_entry_st, rsc);
453         INS_FLD(rcv_cqn, tmp, arbelprm_queue_pair_ee_context_entry_st, cqn_rcv);
454         shift =
455             32 - MT_BIT_SIZE(arbelprm_queue_pair_ee_context_entry_st,
456                              rcv_wqe_base_adr_l);
457         INS_FLD(rcv_wqe_base_adr_l >> shift, tmp,
458                 arbelprm_queue_pair_ee_context_entry_st, rcv_wqe_base_adr_l);
459         INS_FLD(rcv_db_record_index, tmp,
460                 arbelprm_queue_pair_ee_context_entry_st, rcv_db_record_index);
461         INS_FLD(qkey, tmp, arbelprm_queue_pair_ee_context_entry_st, q_key);
462
463         tmp =
464             (__u8 *) (&prm->ctx) +
465             MT_BYTE_OFFSET(arbelprm_queue_pair_ee_context_entry_st,
466                            primary_address_path);
467         INS_FLD(dev_ib_data.port, tmp, arbelprm_address_path_st, port_number);
468
469 }
470
471 static void prep_init2rtr_qpee_buf(void *buf)
472 {
473         struct qp_ee_state_tarnisition_st *prm;
474
475         prm = (struct qp_ee_state_tarnisition_st *)buf;
476
477         memset(prm, 0, sizeof *prm);
478
479         INS_FLD(MTU_2048, &prm->ctx, arbelprm_queue_pair_ee_context_entry_st,
480                 mtu);
481         INS_FLD(11, &prm->ctx, arbelprm_queue_pair_ee_context_entry_st,
482                 msg_max);
483 }
484
485 static void init_av_array(void)
486 {
487 }
488
489 /*
490  * my_log2()
491  */
492 static int my_log2(unsigned long arg)
493 {
494         int i;
495         __u32 tmp;
496
497         if (arg == 0) {
498                 return INT_MIN; /* log2(0) = -infinity */
499         }
500
501         tmp = 1;
502         i = 0;
503         while (tmp < arg) {
504                 tmp = tmp << 1;
505                 ++i;
506         }
507
508         return i;
509 }
510
511 /*
512  * get_req_icm_pages
513  */
514 static unsigned long get_req_icm_pages(unsigned long log2_reserved,
515                                        unsigned long app_rsrc,
516                                        unsigned long entry_size,
517                                        unsigned long *log2_entries_p)
518 {
519         unsigned long size;
520         unsigned long log2_entries;
521
522         log2_entries = my_log2((1 << log2_reserved) + app_rsrc);
523         *log2_entries_p = log2_entries;
524         size = (1 << log2_entries) * entry_size;
525
526         return (size + 4095) >> 12;
527 }
528
529 static void init_uar_context(void *uar_context_va)
530 {
531         void *ptr;
532         /* clear all uar context */
533         memset(uar_context_va, 0, 4096);
534
535         ptr = uar_context_va + MADS_RCV_CQ_ARM_DB_IDX * 8;
536         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
537         INS_FLD_TO_BE(dev_ib_data.mads_qp.rcv_cq.cqn, ptr,
538                       arbelprm_cq_arm_db_record_st, cq_number);
539
540         ptr = uar_context_va + MADS_SND_CQ_ARM_DB_IDX * 8;
541         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
542         INS_FLD_TO_BE(dev_ib_data.mads_qp.snd_cq.cqn, ptr,
543                       arbelprm_cq_arm_db_record_st, cq_number);
544
545         ptr = uar_context_va + IPOIB_RCV_CQ_ARM_DB_IDX * 8;
546         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
547         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.rcv_cq.cqn, ptr,
548                       arbelprm_cq_arm_db_record_st, cq_number);
549
550         ptr = uar_context_va + IPOIB_SND_CQ_ARM_DB_IDX * 8;
551         INS_FLD_TO_BE(UAR_RES_CQ_ARM, ptr, arbelprm_cq_arm_db_record_st, res);
552         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.snd_cq.cqn, ptr,
553                       arbelprm_cq_arm_db_record_st, cq_number);
554
555         ptr = uar_context_va + MADS_SND_QP_DB_IDX * 8;
556         INS_FLD_TO_BE(UAR_RES_SQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
557         INS_FLD_TO_BE(dev_ib_data.mads_qp.qpn, ptr, arbelprm_qp_db_record_st,
558                       qp_number);
559
560         ptr = uar_context_va + IPOIB_SND_QP_DB_IDX * 8;
561         INS_FLD_TO_BE(UAR_RES_SQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
562         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.qpn, ptr, arbelprm_qp_db_record_st,
563                       qp_number);
564
565         ptr = uar_context_va + GROUP_SEP_IDX * 8;
566         INS_FLD_TO_BE(UAR_RES_GROUP_SEP, ptr, arbelprm_cq_arm_db_record_st,
567                       res);
568
569         ptr = uar_context_va + MADS_RCV_QP_DB_IDX * 8;
570         INS_FLD_TO_BE(UAR_RES_RQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
571         INS_FLD_TO_BE(dev_ib_data.mads_qp.qpn, ptr, arbelprm_qp_db_record_st,
572                       qp_number);
573
574         ptr = uar_context_va + IPOIB_RCV_QP_DB_IDX * 8;
575         INS_FLD_TO_BE(UAR_RES_RQ_DBELL, ptr, arbelprm_qp_db_record_st, res);
576         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.qpn, ptr, arbelprm_qp_db_record_st,
577                       qp_number);
578
579         ptr = uar_context_va + MADS_RCV_CQ_CI_DB_IDX * 8;
580         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
581         INS_FLD_TO_BE(dev_ib_data.mads_qp.rcv_cq.cqn, ptr,
582                       arbelprm_cq_ci_db_record_st, cq_number);
583
584         ptr = uar_context_va + MADS_SND_CQ_CI_DB_IDX * 8;
585         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
586         INS_FLD_TO_BE(dev_ib_data.mads_qp.snd_cq.cqn, ptr,
587                       arbelprm_cq_ci_db_record_st, cq_number);
588
589         ptr = uar_context_va + IPOIB_RCV_CQ_CI_DB_IDX * 8;
590         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
591         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.rcv_cq.cqn, ptr,
592                       arbelprm_cq_ci_db_record_st, cq_number);
593
594         ptr = uar_context_va + IPOIB_SND_CQ_CI_DB_IDX * 8;
595         INS_FLD_TO_BE(UAR_RES_CQ_SET_CI, ptr, arbelprm_cq_ci_db_record_st, res);
596         INS_FLD_TO_BE(dev_ib_data.ipoib_qp.snd_cq.cqn, ptr,
597                       arbelprm_cq_ci_db_record_st, cq_number);
598
599 }
600
601 static int setup_hca(__u8 port, void **eq_p)
602 {
603         int ret;
604         int rc;
605         struct query_fw_st qfw;
606         struct map_icm_st map_obj;
607         struct dev_lim_st dev_lim;
608         struct init_hca_st init_hca;
609         __u8 log2_pages;
610         unsigned long icm_start, icm_size, tmp;
611         unsigned long log2_entries;
612         __u32 aux_pages;
613         __u32 mem_key, key, tmp_key;
614         __u8 eqn;
615         __u32 event_mask;
616         struct eqe_t *eq_buf;
617         void *inprm;
618         unsigned long bus_addr;
619         struct query_adapter_st qa;
620         __u8 log_max_uars = 1;
621         void *uar_context_va;
622         __u32 uar_context_pa;
623
624         tprintf("called");
625         init_dev_data();
626         inprm = get_inprm_buf();
627
628         rc = reset_hca();
629         if (rc) {
630                 eprintf("");
631                 return rc;
632         } else {
633                 tprintf("reset_hca() success");
634         }
635
636         mdelay(1000);           /* wait for 1 sec */
637
638         rc = restore_config();
639         if (rc) {
640                 eprintf("");
641                 return rc;
642         } else {
643                 tprintf("restore_config() success");
644         }
645
646         dev_ib_data.pd = GLOBAL_PD;
647         dev_ib_data.port = port;
648         dev_ib_data.qkey = GLOBAL_QKEY;
649
650         rc = cmd_query_fw(&qfw);
651         if (rc) {
652                 eprintf("");
653                 return rc;
654         }
655         else {
656                 tprintf("cmd_query_fw() success");
657
658                 if (print_info) {
659                         printf("FW ver = %d.%d.%d\n",
660                         qfw.fw_rev_major,
661                         qfw.fw_rev_minor,
662                         qfw.fw_rev_subminor);
663                 }
664
665                 tprintf("fw_rev_major=%d", qfw.fw_rev_major);
666                 tprintf("fw_rev_minor=%d", qfw.fw_rev_minor);
667                 tprintf("fw_rev_subminor=%d", qfw.fw_rev_subminor);
668                 tprintf("error_buf_start_h=0x%x", qfw.error_buf_start_h);
669                 tprintf("error_buf_start_l=0x%x", qfw.error_buf_start_l);
670                 tprintf("error_buf_size=%d", qfw.error_buf_size);
671         }
672
673
674
675         bus_addr =
676             ((unsigned long)((u64) qfw.error_buf_start_h << 32) | qfw.
677              error_buf_start_l);
678     dev_ib_data.error_buf_addr= ioremap(bus_addr,
679                                                                                 qfw.error_buf_size*4);
680         dev_ib_data.error_buf_size= qfw.error_buf_size;
681         if (!dev_ib_data.error_buf_addr) {
682                 eprintf("");
683                 return -1;
684         }
685
686
687         bus_addr =
688             ((unsigned long)((u64) qfw.clear_int_addr.addr_h << 32) | qfw.
689              clear_int_addr.addr_l);
690         dev_ib_data.clr_int_addr = bus_to_virt(bus_addr);
691
692         rc = cmd_enable_lam();
693         if (rc == 0x22 /* LAM_NOT_PRE -- need to put a name here */ ) {
694                 // ??????
695         } else if (rc == 0) {
696                 // ??????
697         } else {
698                 eprintf("");
699                 return rc;
700         }
701
702         log2_pages = my_log2(qfw.fw_pages);
703
704         memset(&map_obj, 0, sizeof map_obj);
705         map_obj.num_vpm = 1;
706         map_obj.vpm_arr[0].log2_size = log2_pages;
707         map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
708         rc = cmd_map_fa(&map_obj);
709         if (rc) {
710                 eprintf("");
711                 return rc;
712         }
713         phys_mem.offset += 1 << (log2_pages + 12);
714
715         rc = cmd_run_fw();
716         if (rc) {
717                 ret = -1;
718                 eprintf("");
719                 goto undo_map_fa;
720         }
721
722         rc = cmd_mod_stat_cfg();
723         if (rc) {
724                 ret = -1;
725                 eprintf("");
726                 goto undo_map_fa;
727         }
728
729         rc = cmd_query_dev_lim(&dev_lim);
730         if (rc) {
731                 ret = -1;
732                 eprintf("");
733                 goto undo_map_fa;
734         }
735
736         dev_ib_data.uar_idx = dev_lim.num_rsvd_uars;
737
738         tprintf("max_icm_size_h=0x%lx", dev_lim.max_icm_size_h);
739         tprintf("max_icm_size_l=0x%lx", dev_lim.max_icm_size_l);
740
741         memset(&init_hca, 0, sizeof init_hca);
742         icm_start = 0;
743         icm_size = 0;
744
745         icm_start += ((dev_lim.num_rsvd_uars + 1) << 12);
746         icm_size += ((dev_lim.num_rsvd_uars + 1) << 12);
747
748         tmp = get_req_icm_pages(dev_lim.log2_rsvd_qps,
749                                 MAX_APP_QPS,
750                                 dev_lim.qpc_entry_sz, &log2_entries);
751         init_hca.qpc_base_addr_l = icm_start;
752         init_hca.log_num_of_qp = log2_entries;
753         icm_start += (tmp << 12);
754         icm_size += (tmp << 12);
755
756         init_hca.eqpc_base_addr_l = icm_start;
757         icm_start += (tmp << 12);
758         icm_size += (tmp << 12);
759
760         tmp = get_req_icm_pages(dev_lim.log2_rsvd_srqs,
761                                 0, dev_lim.srq_entry_sz, &log2_entries);
762         init_hca.srqc_base_addr_l = icm_start;
763         init_hca.log_num_of_srq = log2_entries;
764         icm_start += (tmp << 12);
765         icm_size += (tmp << 12);
766
767         tmp = get_req_icm_pages(dev_lim.log2_rsvd_ees,
768                                 0, dev_lim.eec_entry_sz, &log2_entries);
769         init_hca.eec_base_addr_l = icm_start;
770         init_hca.log_num_of_ee = log2_entries;
771         icm_start += (tmp << 12);
772         icm_size += (tmp << 12);
773
774         init_hca.eeec_base_addr_l = icm_start;
775         icm_start += (tmp << 12);
776         icm_size += (tmp << 12);
777
778         tmp = get_req_icm_pages(dev_lim.log2_rsvd_cqs,
779                                 MAX_APP_CQS,
780                                 dev_lim.cqc_entry_sz, &log2_entries);
781         init_hca.cqc_base_addr_l = icm_start;
782         init_hca.log_num_of_cq = log2_entries;
783         icm_start += (tmp << 12);
784         icm_size += (tmp << 12);
785
786         tmp = get_req_icm_pages(dev_lim.log2_rsvd_mtts,
787                                 0, dev_lim.mtt_entry_sz, &log2_entries);
788         init_hca.mtt_base_addr_l = icm_start;
789         icm_start += (tmp << 12);
790         icm_size += (tmp << 12);
791
792         tmp = get_req_icm_pages(dev_lim.log2_rsvd_mrws,
793                                 1, dev_lim.mpt_entry_sz, &log2_entries);
794         init_hca.mpt_base_addr_l = icm_start;
795         init_hca.log_mpt_sz = log2_entries;
796         icm_start += (tmp << 12);
797         icm_size += (tmp << 12);
798
799         tmp = get_req_icm_pages(dev_lim.log2_rsvd_rdbs, 1, 32,  /* size of rdb entry */
800                                 &log2_entries);
801         init_hca.rdb_base_addr_l = icm_start;
802         icm_start += (tmp << 12);
803         icm_size += (tmp << 12);
804
805         init_hca.eqc_base_addr_l = icm_start;
806         init_hca.log_num_of_eq = LOG2_EQS;
807         tmp = dev_lim.eqc_entry_sz * (1 << LOG2_EQS);
808         icm_start += tmp;
809         icm_size += tmp;
810
811         init_hca.mc_base_addr_l = icm_start;
812         init_hca.log_mc_table_entry_sz =
813             my_log2(MT_STRUCT_SIZE(arbelprm_mgm_entry_st));
814         init_hca.mc_table_hash_sz = 8;
815         init_hca.log_mc_table_sz = 3;
816         icm_size +=
817             (MT_STRUCT_SIZE(arbelprm_mgm_entry_st) * init_hca.mc_table_hash_sz);
818         icm_start +=
819             (MT_STRUCT_SIZE(arbelprm_mgm_entry_st) * init_hca.mc_table_hash_sz);
820
821         rc = cmd_set_icm_size(icm_size, &aux_pages);
822         if (rc) {
823                 ret = -1;
824                 eprintf("");
825                 goto undo_map_fa;
826         }
827
828         memset(&map_obj, 0, sizeof map_obj);
829         map_obj.num_vpm = 1;
830         map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
831         map_obj.vpm_arr[0].log2_size = my_log2(aux_pages);
832         rc = cmd_map_icm_aux(&map_obj);
833         if (rc) {
834                 ret = -1;
835                 eprintf("");
836                 goto undo_map_fa;
837         }
838         phys_mem.offset += (1 << (map_obj.vpm_arr[0].log2_size + 12));
839
840         uar_context_pa = phys_mem.base + phys_mem.offset +
841             dev_ib_data.uar_idx * 4096;
842         uar_context_va = phys_to_virt(uar_context_pa);
843         tprintf("uar_context: va=0x%lx, pa=0x%lx", uar_context_va,
844                 uar_context_pa);
845         dev_ib_data.uar_context_base = uar_context_va;
846
847         memset(&map_obj, 0, sizeof map_obj);
848         map_obj.num_vpm = 1;
849         map_obj.vpm_arr[0].pa_l = phys_mem.base + phys_mem.offset;
850         map_obj.vpm_arr[0].log2_size = my_log2((icm_size + 4095) >> 12);
851         rc = cmd_map_icm(&map_obj);
852         if (rc) {
853                 ret = -1;
854                 eprintf("");
855                 goto undo_map_fa;
856         }
857         icm_map_obj = map_obj;
858
859         phys_mem.offset += (1 << (map_obj.vpm_arr[0].log2_size + 12));
860
861         init_hca.log_max_uars = log_max_uars;
862         tprintf("inprm: va=0x%lx, pa=0x%lx", inprm, virt_to_bus(inprm));
863         prep_init_hca_buf(&init_hca, inprm);
864         rc = cmd_init_hca(inprm, MT_STRUCT_SIZE(arbelprm_init_hca_st));
865         if (rc) {
866                 ret = -1;
867                 eprintf("");
868                 goto undo_map_fa;
869         }
870
871         rc = cmd_query_adapter(&qa);
872         if (rc) {
873                 eprintf("");
874                 return rc;
875         }
876         dev_ib_data.clr_int_data = 1 << qa.intapin;
877
878         tmp_key = 1 << dev_lim.log2_rsvd_mrws | MKEY_PREFIX;
879         mem_key = 1 << (dev_lim.log2_rsvd_mrws + 8) | (MKEY_PREFIX >> 24);
880         prep_sw2hw_mpt_buf(inprm, tmp_key);
881         rc = cmd_sw2hw_mpt(&key, 1 << dev_lim.log2_rsvd_mrws, inprm,
882                            SW2HW_MPT_IBUF_SZ);
883         if (rc) {
884                 ret = -1;
885                 eprintf("");
886                 goto undo_map_fa;
887         } else {
888                 tprintf("cmd_sw2hw_mpt() success, key=0x%lx", mem_key);
889         }
890         dev_ib_data.mkey = mem_key;
891
892         eqn = EQN;
893         /* allocate a single EQ which will receive 
894            all the events */
895         eq_buf = dev_buffers_p->eq_buf;
896         init_eq_buf(eq_buf);    /* put in HW ownership */
897         prep_sw2hw_eq_buf(inprm, eq_buf);
898         rc = cmd_sw2hw_eq(SW2HW_EQ_IBUF_SZ);
899         if (rc) {
900                 ret = -1;
901                 eprintf("");
902                 goto undo_sw2hw_mpt;
903         } else
904                 tprintf("cmd_sw2hw_eq() success");
905
906         event_mask = (1 << XDEV_EV_TYPE_CQ_COMP) |
907             (1 << XDEV_EV_TYPE_CQ_ERR) |
908             (1 << XDEV_EV_TYPE_LOCAL_WQ_CATAS_ERR) |
909             (1 << XDEV_EV_TYPE_PORT_ERR) |
910             (1 << XDEV_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR) |
911             (1 << XDEV_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR) |
912             (1 << TAVOR_IF_EV_TYPE_OVERRUN);
913         rc = cmd_map_eq(eqn, event_mask, 1);
914         if (rc) {
915                 ret = -1;
916                 eprintf("");
917                 goto undo_sw2hw_eq;
918         } else
919                 tprintf("cmd_map_eq() success");
920
921         dev_ib_data.eq.eqn = eqn;
922         dev_ib_data.eq.eq_buf = eq_buf;
923         dev_ib_data.eq.cons_counter = 0;
924         dev_ib_data.eq.eq_size = 1 << LOG2_EQ_SZ;
925         bus_addr =
926             ((unsigned long)((u64) qfw.eq_ci_table.addr_h << 32) | qfw.
927              eq_ci_table.addr_l)
928             + eqn * 8;
929         dev_ib_data.eq.ci_base_base_addr = bus_to_virt(bus_addr);
930         *eq_p = &dev_ib_data.eq;
931
932         prep_init_ib_buf(inprm);
933         rc = cmd_init_ib(port, inprm, INIT_IB_IBUF_SZ);
934         if (rc) {
935                 ret = -1;
936                 eprintf("");
937                 goto undo_sw2hw_eq;
938         } else
939                 tprintf("cmd_init_ib() success");
940
941         init_av_array();
942         tprintf("init_av_array() done");
943
944         /* set the qp and cq numbers according
945            to the results of query_dev_lim */
946         dev_ib_data.mads_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
947             +QPN_BASE + MADS_QPN_SN;
948         dev_ib_data.ipoib_qp.qpn = (1 << dev_lim.log2_rsvd_qps) +
949             +QPN_BASE + IPOIB_QPN_SN;
950
951         dev_ib_data.mads_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
952             MADS_SND_CQN_SN;
953         dev_ib_data.mads_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
954             MADS_RCV_CQN_SN;
955
956         dev_ib_data.ipoib_qp.snd_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
957             IPOIB_SND_CQN_SN;
958         dev_ib_data.ipoib_qp.rcv_cq.cqn = (1 << dev_lim.log2_rsvd_cqs) +
959             IPOIB_RCV_CQN_SN;
960
961         init_uar_context(uar_context_va);
962
963         ret = 0;
964         goto exit;
965
966       undo_sw2hw_eq:
967         rc = cmd_hw2sw_eq(eqn);
968         if (rc)
969                 eprintf("");
970         else
971                 tprintf("cmd_hw2sw_eq() success");
972
973       undo_sw2hw_mpt:
974         rc = cmd_hw2sw_mpt(tmp_key);
975         if (rc)
976                 eprintf("");
977
978       undo_map_fa:
979         rc = cmd_unmap_fa();
980         if (rc)
981                 eprintf("");
982
983       exit:
984         return ret;
985 }
986
987
988 static int unset_hca(void)
989 {
990         int rc, ret = 0;
991
992         rc = cmd_unmap_icm(&icm_map_obj);
993         if (rc)
994                 eprintf("");
995         ret |= rc;
996
997
998         rc = cmd_unmap_icm_aux();
999         if (rc)
1000                 eprintf("");
1001         ret |= rc;
1002
1003         rc = cmd_unmap_fa();
1004         if (rc)
1005                 eprintf("");
1006         ret |= rc;
1007
1008         return ret;
1009 }
1010
1011 static void *get_inprm_buf(void)
1012 {
1013         return dev_buffers_p->inprm_buf;
1014 }
1015
1016 static void *get_outprm_buf(void)
1017 {
1018         return dev_buffers_p->outprm_buf;
1019 }
1020
1021 static void *get_send_wqe_buf(void *wqe, __u8 index)
1022 {
1023         struct ud_send_wqe_st *snd_wqe = wqe;
1024
1025         return bus_to_virt(be32_to_cpu(snd_wqe->mpointer[index].local_addr_l));
1026 }
1027
1028 static void *get_rcv_wqe_buf(void *wqe, __u8 index)
1029 {
1030         struct recv_wqe_st *rcv_wqe = wqe;
1031
1032         return bus_to_virt(be32_to_cpu(rcv_wqe->mpointer[index].local_addr_l));
1033 }
1034
1035 static void modify_av_params(struct ud_av_st *av,
1036                              __u16 dlid,
1037                              __u8 g,
1038                              __u8 sl, __u8 rate, union ib_gid_u *gid, __u32 qpn)
1039 {
1040         memset(&av->av, 0, sizeof av->av);
1041
1042         INS_FLD_TO_BE(dev_ib_data.port, &av->av, arbelprm_ud_address_vector_st,
1043                       port_number);
1044         INS_FLD_TO_BE(dev_ib_data.pd, &av->av, arbelprm_ud_address_vector_st,
1045                       pd);
1046         INS_FLD_TO_BE(dlid, &av->av, arbelprm_ud_address_vector_st, rlid);
1047         INS_FLD_TO_BE(g, &av->av, arbelprm_ud_address_vector_st, g);
1048         INS_FLD_TO_BE(sl, &av->av, arbelprm_ud_address_vector_st, sl);
1049         INS_FLD_TO_BE(3, &av->av, arbelprm_ud_address_vector_st, msg);
1050
1051         if (rate >= 3)
1052                 INS_FLD_TO_BE(0, &av->av, arbelprm_ud_address_vector_st, max_stat_rate);        /* 4x */
1053         else
1054                 INS_FLD_TO_BE(1, &av->av, arbelprm_ud_address_vector_st, max_stat_rate);        /* 1x */
1055
1056         if (g) {
1057                 if (gid) {
1058                         INS_FLD(*((__u32 *) (&gid->raw[0])), &av->av,
1059                                 arbelprm_ud_address_vector_st, rgid_127_96);
1060                         INS_FLD(*((__u32 *) (&gid->raw[4])), &av->av,
1061                                 arbelprm_ud_address_vector_st, rgid_95_64);
1062                         INS_FLD(*((__u32 *) (&gid->raw[8])), &av->av,
1063                                 arbelprm_ud_address_vector_st, rgid_63_32);
1064                         INS_FLD(*((__u32 *) (&gid->raw[12])), &av->av,
1065                                 arbelprm_ud_address_vector_st, rgid_31_0);
1066                 } else {
1067                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1068                                 rgid_127_96);
1069                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1070                                 rgid_95_64);
1071                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1072                                 rgid_63_32);
1073                         INS_FLD(0, &av->av, arbelprm_ud_address_vector_st,
1074                                 rgid_31_0);
1075                 }
1076         } else {
1077                 INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_127_96);
1078                 INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_95_64);
1079                 INS_FLD(0, &av->av, arbelprm_ud_address_vector_st, rgid_63_32);
1080                 INS_FLD(2, &av->av, arbelprm_ud_address_vector_st, rgid_31_0);
1081         }
1082         av->dest_qp = qpn;
1083         av->qkey = dev_ib_data.qkey;
1084 }
1085
1086 static void init_cq_buf(union cqe_st *cq_buf, __u8 num_cqes)
1087 {
1088         int i;
1089
1090         memset(cq_buf, 0, sizeof(union cqe_st) * num_cqes);
1091         for (i = 0; i < num_cqes; ++i) {
1092                 WRITE_BYTE_VOL(&cq_buf[i], CQE_OWNER_OFFSET, CQE_OWNER_VAL_HW);
1093         }
1094 }
1095
1096 static int post_rcv_buf(struct udqp_st *qp, struct recv_wqe_st *rcv_wqe)
1097 {
1098         int i;
1099
1100         /* put a valid lkey */
1101         for (i = 0; i < MAX_SCATTER; ++i) {
1102                 rcv_wqe->mpointer[i].lkey = cpu_to_be32(dev_ib_data.mkey);
1103         }
1104
1105         qp->post_rcv_counter++;
1106         WRITE_WORD_VOL(qp->rcv_uar_context, 2, htons(qp->post_rcv_counter));
1107
1108         return 0;
1109 }
1110
1111 static int post_send_req(void *qph, void *wqeh, __u8 num_gather)
1112 {
1113         int rc;
1114         struct udqp_st *qp = qph;
1115         struct ud_send_wqe_st *snd_wqe = wqeh;
1116         struct send_doorbell_st dbell;
1117         __u32 nds;
1118
1119         qp->post_send_counter++;
1120
1121         WRITE_WORD_VOL(qp->send_uar_context, 2, htons(qp->post_send_counter));
1122
1123         memset(&dbell, 0, sizeof dbell);
1124         INS_FLD(XDEV_NOPCODE_SEND, &dbell, arbelprm_send_doorbell_st, nopcode);
1125         INS_FLD(1, &dbell, arbelprm_send_doorbell_st, f);
1126         INS_FLD(qp->post_send_counter - 1, &dbell, arbelprm_send_doorbell_st,
1127                 wqe_counter);
1128         INS_FLD(1, &dbell, arbelprm_send_doorbell_st, wqe_cnt);
1129         nds = (sizeof(snd_wqe->next) +
1130                sizeof(snd_wqe->udseg) +
1131                sizeof(snd_wqe->mpointer[0]) * num_gather) >> 4;
1132         INS_FLD(nds, &dbell, arbelprm_send_doorbell_st, nds);
1133         INS_FLD(qp->qpn, &dbell, arbelprm_send_doorbell_st, qpn);
1134
1135         if (qp->last_posted_snd_wqe) {
1136                 INS_FLD_TO_BE(nds,
1137                               &qp->last_posted_snd_wqe->next.next,
1138                               arbelprm_wqe_segment_next_st, nds);
1139                 INS_FLD_TO_BE(1,
1140                               &qp->last_posted_snd_wqe->next.next,
1141                               arbelprm_wqe_segment_next_st, f);
1142                 INS_FLD_TO_BE(XDEV_NOPCODE_SEND,
1143                               &qp->last_posted_snd_wqe->next.next,
1144                               arbelprm_wqe_segment_next_st, nopcode);
1145         }
1146
1147         rc = cmd_post_doorbell(&dbell, POST_SND_OFFSET);
1148         if (!rc) {
1149                 qp->last_posted_snd_wqe = snd_wqe;
1150         }
1151
1152         return rc;
1153
1154 }
1155
1156 static int create_mads_qp(void **qp_pp, void **snd_cq_pp, void **rcv_cq_pp)
1157 {
1158         __u8 i, next_i, j, k;
1159         int rc;
1160         struct udqp_st *qp;
1161         __u32 bus_addr;
1162         __u8 nds;
1163         void *ptr;
1164
1165         qp = &dev_ib_data.mads_qp;
1166
1167         /* set the pointer to the receive WQEs buffer */
1168         qp->rcv_wq = dev_buffers_p->mads_qp_rcv_queue;
1169
1170         qp->send_buf_sz = MAD_BUF_SZ;
1171         qp->rcv_buf_sz = MAD_BUF_SZ;
1172
1173         qp->max_recv_wqes = NUM_MADS_RCV_WQES;  /* max wqes in this work queue */
1174         qp->recv_wqe_cur_free = NUM_MADS_RCV_WQES;      /* current free wqes */
1175         qp->recv_wqe_alloc_idx = 0;     /* index from wqes can be allocated if there are free wqes */
1176
1177         qp->rcv_uar_context =
1178             dev_ib_data.uar_context_base + 8 * MADS_RCV_QP_DB_IDX;
1179         qp->send_uar_context =
1180             dev_ib_data.uar_context_base + 8 * MADS_SND_QP_DB_IDX;
1181
1182         memset(&qp->rcv_wq[0], 0, NUM_MADS_RCV_WQES * sizeof(qp->rcv_wq[0]));
1183         nds = sizeof(qp->rcv_wq[0].wqe) >> 4;
1184         /* iterrate through the list */
1185         for (j = 0, i = 0, next_i = 1;
1186              j < NUM_MADS_RCV_WQES;
1187              MOD_INC(i, NUM_MADS_RCV_WQES), MOD_INC(next_i, NUM_MADS_RCV_WQES),
1188              ++j) {
1189
1190                 qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
1191                 /* link the WQE to the next one */
1192                 bus_addr = virt_to_bus(&qp->rcv_wq[next_i].wqe);
1193                 ptr = qp->rcv_wq[i].wqe.control +
1194                     MT_BYTE_OFFSET(arbelprm_wqe_segment_ctrl_recv_st,
1195                                    wqe_segment_next);
1196                 INS_FLD(bus_addr >> 6, ptr, arbelprm_recv_wqe_segment_next_st,
1197                         nda_31_6);
1198                 INS_FLD(nds, ptr, arbelprm_recv_wqe_segment_next_st, nds);
1199
1200                 /* set the allocated buffers */
1201                 qp->rcv_bufs[i] = ib_buffers.rcv_mad_buf[i];
1202                 bus_addr = virt_to_bus(qp->rcv_bufs[i]);
1203                 qp->rcv_wq[i].wqe.mpointer[0].local_addr_l = bus_addr;
1204                 qp->rcv_wq[i].wqe.mpointer[0].byte_count = GRH_SIZE;
1205                 bus_addr = virt_to_bus(qp->rcv_bufs[i] + GRH_SIZE);
1206                 qp->rcv_wq[i].wqe.mpointer[1].local_addr_l = bus_addr;
1207                 qp->rcv_wq[i].wqe.mpointer[1].byte_count = MAD_BUF_SZ;
1208
1209                 for (k = 0; k < (((sizeof(qp->rcv_wq[i])) >> 4) - 1); ++k) {
1210                         qp->rcv_wq[i].wqe.mpointer[k].lkey = INVALID_WQE_LKEY;
1211                 }
1212         }
1213         cpu_to_be_buf(&qp->rcv_wq[0],
1214                       NUM_MADS_RCV_WQES * sizeof(qp->rcv_wq[0]));
1215
1216         for (i = 0; i < qp->max_recv_wqes; ++i) {
1217                 qp->rcv_wq[i].wqe_cont.qp = qp;
1218         }
1219
1220         /* set the pointer to the send WQEs buffer */
1221         qp->snd_wq = dev_buffers_p->mads_qp_snd_queue;
1222
1223         qp->snd_wqe_alloc_idx = 0;
1224         qp->max_snd_wqes = NUM_MADS_SND_WQES;
1225         qp->snd_wqe_cur_free = NUM_MADS_SND_WQES;
1226
1227         memset(&qp->snd_wq[0], 0, NUM_MADS_SND_WQES * sizeof(qp->snd_wq[i]));
1228         /* iterrate through the list */
1229         for (j = 0, i = 0, next_i = 1;
1230              j < NUM_MADS_RCV_WQES;
1231              MOD_INC(i, NUM_MADS_SND_WQES), MOD_INC(next_i, NUM_MADS_SND_WQES),
1232              ++j) {
1233
1234                 /* link the WQE to the next one */
1235                 bus_addr = virt_to_bus(&qp->snd_wq[next_i].wqe_cont.wqe);
1236                 INS_FLD(bus_addr >> 6, &qp->snd_wq[i].wqe_cont.wqe.next.next,
1237                         arbelprm_wqe_segment_next_st, nda_31_6);
1238
1239                 /* set the allocated buffers */
1240                 qp->snd_bufs[i] = ib_buffers.send_mad_buf[i];
1241                 bus_addr = virt_to_bus(qp->snd_bufs[i]);
1242                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].local_addr_l = bus_addr;
1243                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].lkey = dev_ib_data.mkey;
1244                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].byte_count =
1245                     qp->send_buf_sz;
1246
1247         }
1248
1249         cpu_to_be_buf(&qp->snd_wq[0],
1250                       NUM_MADS_SND_WQES * sizeof(qp->snd_wq[i]));
1251
1252         for (i = 0; i < qp->max_snd_wqes; ++i) {
1253                 qp->snd_wq[i].wqe_cont.qp = qp;
1254         }
1255
1256         /* qp number and cq numbers are already set up */
1257         qp->snd_cq.cq_buf = dev_buffers_p->mads_snd_cq_buf;
1258         qp->rcv_cq.cq_buf = dev_buffers_p->mads_rcv_cq_buf;
1259         qp->snd_cq.num_cqes = NUM_MADS_SND_CQES;
1260         qp->rcv_cq.num_cqes = NUM_MADS_RCV_CQES;
1261         qp->snd_cq.arm_db_ctx_idx = MADS_SND_CQ_ARM_DB_IDX;
1262         qp->snd_cq.ci_db_ctx_idx = MADS_SND_CQ_CI_DB_IDX;
1263         qp->rcv_cq.arm_db_ctx_idx = MADS_RCV_CQ_ARM_DB_IDX;
1264         qp->rcv_cq.ci_db_ctx_idx = MADS_RCV_CQ_CI_DB_IDX;
1265         qp->rcv_db_record_index = MADS_RCV_QP_DB_IDX;
1266         qp->snd_db_record_index = MADS_SND_QP_DB_IDX;
1267         qp->qkey = GLOBAL_QKEY;
1268         rc = create_udqp(qp);
1269         if (!rc) {
1270                 *qp_pp = qp;
1271                 *snd_cq_pp = &qp->snd_cq;
1272                 *rcv_cq_pp = &qp->rcv_cq;
1273         }
1274
1275         return rc;
1276 }
1277
1278 static int create_ipoib_qp(void **qp_pp,
1279                            void **snd_cq_pp, void **rcv_cq_pp, __u32 qkey)
1280 {
1281         __u8 i, next_i, j, k;
1282         int rc;
1283         struct udqp_st *qp;
1284         __u32 bus_addr;
1285         __u8 nds;
1286         void *ptr;
1287
1288         qp = &dev_ib_data.ipoib_qp;
1289
1290         /* set the pointer to the receive WQEs buffer */
1291         qp->rcv_wq = dev_buffers_p->ipoib_qp_rcv_queue;
1292
1293         qp->send_buf_sz = IPOIB_SND_BUF_SZ;
1294         qp->rcv_buf_sz = IPOIB_RCV_BUF_SZ;
1295
1296         qp->max_recv_wqes = NUM_IPOIB_RCV_WQES;
1297         qp->recv_wqe_cur_free = NUM_IPOIB_RCV_WQES;
1298
1299         qp->rcv_uar_context =
1300             dev_ib_data.uar_context_base + 8 * IPOIB_RCV_QP_DB_IDX;
1301         qp->send_uar_context =
1302             dev_ib_data.uar_context_base + 8 * IPOIB_SND_QP_DB_IDX;
1303
1304         memset(&qp->rcv_wq[0], 0, NUM_IPOIB_RCV_WQES * sizeof(qp->rcv_wq[0]));
1305         nds = sizeof(qp->rcv_wq[0].wqe) >> 4;
1306         /* iterrate through the list */
1307         for (j = 0, i = 0, next_i = 1;
1308              j < NUM_IPOIB_RCV_WQES;
1309              MOD_INC(i, NUM_IPOIB_RCV_WQES), MOD_INC(next_i,
1310                                                      NUM_IPOIB_RCV_WQES), ++j) {
1311
1312                 /* link the WQE to the next one */
1313                 bus_addr = virt_to_bus(&qp->rcv_wq[next_i].wqe);
1314                 ptr = qp->rcv_wq[i].wqe.control +
1315                     MT_BYTE_OFFSET(arbelprm_wqe_segment_ctrl_recv_st,
1316                                    wqe_segment_next);
1317                 INS_FLD(bus_addr >> 6, ptr, arbelprm_recv_wqe_segment_next_st,
1318                         nda_31_6);
1319                 INS_FLD(nds, ptr, arbelprm_recv_wqe_segment_next_st, nds);
1320
1321                 /* set the allocated buffers */
1322                 qp->rcv_bufs[i] = ib_buffers.ipoib_rcv_buf[i];
1323                 bus_addr = virt_to_bus(qp->rcv_bufs[i]);
1324                 qp->rcv_wq[i].wqe.mpointer[0].local_addr_l = bus_addr;
1325                 qp->rcv_wq[i].wqe.mpointer[0].byte_count = GRH_SIZE;
1326                 bus_addr = virt_to_bus(qp->rcv_bufs[i] + GRH_SIZE);
1327                 qp->rcv_wq[i].wqe.mpointer[1].local_addr_l = bus_addr;
1328                 qp->rcv_wq[i].wqe.mpointer[1].byte_count = IPOIB_RCV_BUF_SZ;
1329
1330                 for (k = 0; k < (((sizeof(qp->rcv_wq[i].wqe)) >> 4) - 1); ++k) {
1331                         qp->rcv_wq[i].wqe.mpointer[k].lkey = INVALID_WQE_LKEY;
1332                 }
1333         }
1334         cpu_to_be_buf(&qp->rcv_wq[0],
1335                       NUM_IPOIB_RCV_WQES * sizeof(qp->rcv_wq[0]));
1336
1337         for (i = 0; i < qp->max_recv_wqes; ++i) {
1338                 qp->rcv_wq[i].wqe_cont.qp = qp;
1339         }
1340
1341         /* set the pointer to the send WQEs buffer */
1342         qp->snd_wq = dev_buffers_p->ipoib_qp_snd_queue;
1343
1344         qp->snd_wqe_alloc_idx = 0;
1345         qp->max_snd_wqes = NUM_IPOIB_SND_WQES;
1346         qp->snd_wqe_cur_free = NUM_IPOIB_SND_WQES;
1347
1348         memset(&qp->snd_wq[0], 0, NUM_IPOIB_SND_WQES * sizeof(qp->snd_wq[i]));
1349         /* iterrate through the list */
1350         for (j = 0, i = 0, next_i = 1;
1351              j < NUM_IPOIB_RCV_WQES;
1352              MOD_INC(i, NUM_IPOIB_SND_WQES), MOD_INC(next_i,
1353                                                      NUM_IPOIB_SND_WQES), ++j) {
1354
1355                 /* link the WQE to the next one */
1356                 bus_addr = virt_to_bus(&qp->snd_wq[next_i].wqe_cont.wqe);
1357                 INS_FLD(bus_addr >> 6, &qp->snd_wq[i].wqe_cont.wqe.next.next,
1358                         arbelprm_wqe_segment_next_st, nda_31_6);
1359
1360                 /* set the allocated buffers */
1361                 qp->snd_bufs[i] = ib_buffers.send_ipoib_buf[i];
1362                 bus_addr = virt_to_bus(qp->snd_bufs[i]);
1363                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].local_addr_l = bus_addr;
1364                 qp->snd_wq[i].wqe_cont.wqe.mpointer[0].lkey = dev_ib_data.mkey;
1365
1366         }
1367         cpu_to_be_buf(&qp->snd_wq[0],
1368                       NUM_IPOIB_SND_WQES * sizeof(qp->snd_wq[i]));
1369
1370         for (i = 0; i < qp->max_snd_wqes; ++i) {
1371                 qp->snd_wq[i].wqe_cont.qp = qp;
1372         }
1373
1374         /* qp number and cq numbers are already set up */
1375         qp->snd_cq.cq_buf = dev_buffers_p->ipoib_snd_cq_buf;
1376         qp->rcv_cq.cq_buf = dev_buffers_p->ipoib_rcv_cq_buf;
1377         qp->snd_cq.num_cqes = NUM_IPOIB_SND_CQES;
1378         qp->rcv_cq.num_cqes = NUM_IPOIB_RCV_CQES;
1379         qp->snd_cq.arm_db_ctx_idx = IPOIB_SND_CQ_ARM_DB_IDX;
1380         qp->snd_cq.ci_db_ctx_idx = IPOIB_SND_CQ_CI_DB_IDX;
1381         qp->rcv_cq.arm_db_ctx_idx = IPOIB_RCV_CQ_ARM_DB_IDX;
1382         qp->rcv_cq.ci_db_ctx_idx = IPOIB_RCV_CQ_CI_DB_IDX;
1383         qp->rcv_db_record_index = IPOIB_RCV_QP_DB_IDX;
1384         qp->snd_db_record_index = IPOIB_SND_QP_DB_IDX;
1385         qp->qkey = qkey;
1386         rc = create_udqp(qp);
1387         if (!rc) {
1388                 *qp_pp = qp;
1389                 *snd_cq_pp = &qp->snd_cq;
1390                 *rcv_cq_pp = &qp->rcv_cq;
1391         }
1392
1393         return rc;
1394 }
1395
1396 static int create_udqp(struct udqp_st *qp)
1397 {
1398         int rc, ret = 0;
1399         void *inprm;
1400         struct recv_wqe_st *rcv_wqe;
1401
1402         inprm = dev_buffers_p->inprm_buf;
1403
1404         qp->rcv_cq.arm_db_ctx_pointer =
1405             dev_ib_data.uar_context_base + 8 * qp->rcv_cq.arm_db_ctx_idx;
1406         qp->rcv_cq.ci_db_ctx_pointer =
1407             dev_ib_data.uar_context_base + 8 * qp->rcv_cq.ci_db_ctx_idx;
1408         qp->snd_cq.arm_db_ctx_pointer =
1409             dev_ib_data.uar_context_base + 8 * qp->snd_cq.arm_db_ctx_idx;
1410         qp->snd_cq.ci_db_ctx_pointer =
1411             dev_ib_data.uar_context_base + 8 * qp->snd_cq.ci_db_ctx_idx;
1412
1413         /* create send CQ */
1414         init_cq_buf(qp->snd_cq.cq_buf, qp->snd_cq.num_cqes);
1415         qp->snd_cq.cons_counter = 0;
1416         prep_sw2hw_cq_buf(inprm,
1417                           dev_ib_data.eq.eqn,
1418                           qp->snd_cq.cqn,
1419                           qp->snd_cq.cq_buf,
1420                           qp->snd_cq.ci_db_ctx_idx, qp->snd_cq.arm_db_ctx_idx);
1421
1422         rc = cmd_sw2hw_cq(qp->snd_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1423         if (rc) {
1424                 ret = -1;
1425                 eprintf("");
1426                 goto exit;
1427         }
1428
1429         /* create receive CQ */
1430         init_cq_buf(qp->rcv_cq.cq_buf, qp->rcv_cq.num_cqes);
1431         qp->rcv_cq.cons_counter = 0;
1432         memset(inprm, 0, SW2HW_CQ_IBUF_SZ);
1433         prep_sw2hw_cq_buf(inprm,
1434                           dev_ib_data.eq.eqn,
1435                           qp->rcv_cq.cqn,
1436                           qp->rcv_cq.cq_buf,
1437                           qp->rcv_cq.ci_db_ctx_idx, qp->rcv_cq.arm_db_ctx_idx);
1438
1439         rc = cmd_sw2hw_cq(qp->rcv_cq.cqn, inprm, SW2HW_CQ_IBUF_SZ);
1440         if (rc) {
1441                 ret = -1;
1442                 eprintf("");
1443                 goto undo_snd_cq;
1444         }
1445
1446         prep_rst2init_qpee_buf(inprm,
1447                                qp->snd_cq.cqn,
1448                                qp->rcv_cq.cqn,
1449                                qp->qkey,
1450                                my_log2(qp->max_recv_wqes),
1451                                my_log2(sizeof(qp->rcv_wq[0])) - 4,
1452                                my_log2(qp->max_snd_wqes),
1453                                my_log2(sizeof(qp->snd_wq[0])) - 4,
1454                                virt_to_bus(qp->snd_wq),
1455                                qp->snd_db_record_index,
1456                                virt_to_bus(qp->rcv_wq),
1457                                qp->rcv_db_record_index);
1458
1459         rc = cmd_rst2init_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1460         if (rc) {
1461                 ret = -1;
1462                 eprintf("");
1463                 goto undo_rcv_cq;
1464         }
1465
1466         qp->last_posted_rcv_wqe = NULL;
1467         qp->last_posted_snd_wqe = NULL;
1468
1469         /* post all the buffers to the receive queue */
1470         while (1) {
1471                 /* allocate wqe */
1472                 rcv_wqe = alloc_rcv_wqe(qp);
1473                 if (!rcv_wqe)
1474                         break;
1475
1476                 /* post the buffer */
1477                 rc = post_rcv_buf(qp, rcv_wqe);
1478                 if (rc) {
1479                         ret = -1;
1480                         eprintf("");
1481                         goto undo_rcv_cq;
1482                 }
1483         }
1484
1485         prep_init2rtr_qpee_buf(inprm);
1486         rc = cmd_init2rtr_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1487         if (rc) {
1488                 ret = -1;
1489                 eprintf("");
1490                 goto undo_rcv_cq;
1491         }
1492
1493         memset(inprm, 0, QPCTX_IBUF_SZ);
1494         rc = cmd_rtr2rts_qpee(qp->qpn, inprm, QPCTX_IBUF_SZ);
1495         if (rc) {
1496                 ret = -1;
1497                 eprintf("");
1498                 goto undo_rcv_cq;
1499         }
1500
1501         goto exit;
1502
1503       undo_rcv_cq:
1504         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1505         if (rc)
1506                 eprintf("");
1507
1508       undo_snd_cq:
1509         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1510         if (rc)
1511                 eprintf("");
1512
1513       exit:
1514         return ret;
1515 }
1516
1517 static int destroy_udqp(struct udqp_st *qp)
1518 {
1519         int rc;
1520
1521         rc = cmd_2err_qpee(qp->qpn);
1522         if (rc) {
1523                 eprintf("");
1524                 return rc;
1525         }
1526         tprintf("cmd_2err_qpee(0x%lx) success", qp->qpn);
1527
1528         rc = cmd_2rst_qpee(qp->qpn);
1529         if (rc) {
1530                 eprintf("");
1531                 return rc;
1532         }
1533         tprintf("cmd_2rst_qpee(0x%lx) success", qp->qpn);
1534
1535         rc = cmd_hw2sw_cq(qp->rcv_cq.cqn);
1536         if (rc) {
1537                 eprintf("");
1538                 return rc;
1539         }
1540         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->snd_cq.cqn);
1541
1542         rc = cmd_hw2sw_cq(qp->snd_cq.cqn);
1543         if (rc) {
1544                 eprintf("");
1545                 return rc;
1546         }
1547         tprintf("cmd_hw2sw_cq(0x%lx) success", qp->rcv_cq.cqn);
1548
1549         return rc;
1550 }
1551
1552 static void prep_send_wqe_buf(void *qph,
1553                               void *avh,
1554                               void *wqeh,
1555                               const void *buf,
1556                               unsigned int offset, __u16 len, __u8 e)
1557 {
1558         struct ud_send_wqe_st *snd_wqe = wqeh;
1559         struct ud_av_st *av = avh;
1560
1561         if (qph) {
1562         }
1563         /* suppress warnings */
1564         INS_FLD_TO_BE(e, &snd_wqe->next.control,
1565                       arbelprm_wqe_segment_ctrl_send_st, e);
1566         INS_FLD_TO_BE(1, &snd_wqe->next.control,
1567                       arbelprm_wqe_segment_ctrl_send_st, always1);
1568         INS_FLD_TO_BE(1, &snd_wqe->next.next, arbelprm_wqe_segment_next_st,
1569                       always1);
1570         memcpy(&snd_wqe->udseg, &av->av, sizeof av->av);
1571         INS_FLD_TO_BE(av->dest_qp, snd_wqe->udseg.av,
1572                       arbelprm_wqe_segment_ud_st, destination_qp);
1573         INS_FLD_TO_BE(av->qkey, snd_wqe->udseg.av, arbelprm_wqe_segment_ud_st,
1574                       q_key);
1575
1576         if (buf) {
1577                 memcpy(bus_to_virt
1578                        (be32_to_cpu(snd_wqe->mpointer[0].local_addr_l)) +
1579                        offset, buf, len);
1580                 len += offset;
1581         }
1582         snd_wqe->mpointer[0].byte_count = cpu_to_be32(len);
1583 }
1584
1585 static void *alloc_ud_av(void)
1586 {
1587         u8 next_free;
1588
1589         if (dev_ib_data.udav.udav_next_free == FL_EOL) {
1590                 return NULL;
1591         }
1592
1593         next_free = dev_ib_data.udav.udav_next_free;
1594         dev_ib_data.udav.udav_next_free =
1595             dev_buffers_p->av_array[next_free].ud_av.next_free;
1596         tprintf("allocated udav %d", next_free);
1597         return &dev_buffers_p->av_array[next_free].ud_av;
1598 }
1599
1600 static void free_ud_av(void *avh)
1601 {
1602         union ud_av_u *avu;
1603         __u8 idx, old_idx;
1604         struct ud_av_st *av = avh;
1605
1606         avu = (union ud_av_u *)av;
1607
1608         idx = avu - dev_buffers_p->av_array;
1609         tprintf("freeing udav idx=%d", idx);
1610         old_idx = dev_ib_data.udav.udav_next_free;
1611         dev_ib_data.udav.udav_next_free = idx;
1612         avu->ud_av.next_free = old_idx;
1613 }
1614
1615 static int update_cq_cons_idx(struct cq_st *cq)
1616 {
1617         /* write doorbell record */
1618         WRITE_DWORD_VOL(cq->ci_db_ctx_pointer, 0, htonl(cq->cons_counter));
1619
1620         /*
1621            INS_FLD_TO_BE(cq->cons_counter,
1622            cq->ci_db_ctx_pointer,
1623            arbelprm_cq_arm_db_record_st,
1624            counter);
1625
1626            INS_FLD_TO_BE(cq->cqn,
1627            cq->ci_db_ctx_pointer,
1628            arbelprm_cq_arm_db_record_st,
1629            cq_number);
1630
1631            INS_FLD_TO_BE(1,
1632            cq->ci_db_ctx_pointer,
1633            arbelprm_cq_arm_db_record_st,
1634            res); */
1635
1636         return 0;
1637 }
1638
1639 static int poll_cq(void *cqh, union cqe_st *cqe_p, u8 * num_cqes)
1640 {
1641         union cqe_st cqe;
1642         int rc;
1643         u32 *ptr;
1644         struct cq_st *cq = cqh;
1645         __u32 cons_idx = cq->cons_counter & (cq->num_cqes - 1);
1646
1647         ptr = (u32 *) (&(cq->cq_buf[cons_idx]));
1648         barrier();
1649         if ((ptr[7] & 0x80000000) == 0) {
1650                 cqe = cq->cq_buf[cons_idx];
1651                 be_to_cpu_buf(&cqe, sizeof(cqe));
1652                 *cqe_p = cqe;
1653                 ptr[7] = 0x80000000;
1654                 barrier();
1655                 cq->cons_counter++;
1656                 rc = update_cq_cons_idx(cq);
1657                 if (rc) {
1658                         return rc;
1659                 }
1660                 *num_cqes = 1;
1661         } else
1662                 *num_cqes = 0;
1663
1664         return 0;
1665 }
1666
1667 static void dev2ib_cqe(struct ib_cqe_st *ib_cqe_p, union cqe_st *cqe_p)
1668 {
1669         __u8 opcode;
1670         __u32 wqe_addr_ba;
1671
1672         opcode =
1673             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st, opcode);
1674         if (opcode >= CQE_ERROR_OPCODE)
1675                 ib_cqe_p->is_error = 1;
1676         else
1677                 ib_cqe_p->is_error = 0;
1678
1679         ib_cqe_p->is_send =
1680             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st, s);
1681         wqe_addr_ba =
1682             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st,
1683                    wqe_adr) << 6;
1684         ib_cqe_p->wqe = bus_to_virt(wqe_addr_ba);
1685
1686         ib_cqe_p->count =
1687             EX_FLD(cqe_p->good_cqe, arbelprm_completion_queue_entry_st,
1688                    byte_cnt);
1689 }
1690
1691 static int ib_poll_cq(void *cqh, struct ib_cqe_st *ib_cqe_p, u8 * num_cqes)
1692 {
1693         int rc;
1694         union cqe_st cqe;
1695         struct cq_st *cq = cqh;
1696         __u8 opcode;
1697
1698         rc = poll_cq(cq, &cqe, num_cqes);
1699         if (rc || ((*num_cqes) == 0)) {
1700                 return rc;
1701         }
1702
1703         dev2ib_cqe(ib_cqe_p, &cqe);
1704
1705         opcode =
1706             EX_FLD(cqe.good_cqe, arbelprm_completion_queue_entry_st, opcode);
1707         if (opcode >= CQE_ERROR_OPCODE) {
1708                 struct ud_send_wqe_st *wqe_p, wqe;
1709                 __u32 *ptr;
1710                 unsigned int i;
1711
1712                 wqe_p =
1713                     bus_to_virt(EX_FLD
1714                                 (cqe.error_cqe,
1715                                  arbelprm_completion_with_error_st,
1716                                  wqe_addr) << 6);
1717                 eprintf("syndrome=0x%lx",
1718                         EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
1719                                syndrome));
1720                 eprintf("vendor_syndrome=0x%lx",
1721                         EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
1722                                vendor_code));
1723                 eprintf("wqe_addr=0x%lx", wqe_p);
1724                 eprintf("myqpn=0x%lx",
1725                         EX_FLD(cqe.error_cqe, arbelprm_completion_with_error_st,
1726                                myqpn));
1727                 memcpy(&wqe, wqe_p, sizeof wqe);
1728                 be_to_cpu_buf(&wqe, sizeof wqe);
1729
1730                 eprintf("dumping wqe...");
1731                 ptr = (__u32 *) (&wqe);
1732                 for (i = 0; i < sizeof wqe; i += 4) {
1733                         printf("%lx : ", ptr[i >> 2]);
1734                 }
1735
1736         }
1737
1738         return rc;
1739 }
1740
1741 /* always work on ipoib qp */
1742 static int add_qp_to_mcast_group(union ib_gid_u mcast_gid, __u8 add)
1743 {
1744         void *mg;
1745         __u8 *tmp;
1746         int rc;
1747         __u16 mgid_hash;
1748         void *mgmqp_p;
1749
1750         tmp = dev_buffers_p->inprm_buf;
1751         memcpy(tmp, mcast_gid.raw, 16);
1752         be_to_cpu_buf(tmp, 16);
1753         rc = cmd_mgid_hash(tmp, &mgid_hash);
1754         if (!rc) {
1755                 mg = (void *)dev_buffers_p->inprm_buf;
1756                 memset(mg, 0, MT_STRUCT_SIZE(arbelprm_mgm_entry_st));
1757                 INS_FLD(mcast_gid.as_u32.dw[0], mg, arbelprm_mgm_entry_st,
1758                         mgid_128_96);
1759                 INS_FLD(mcast_gid.as_u32.dw[1], mg, arbelprm_mgm_entry_st,
1760                         mgid_95_64);
1761                 INS_FLD(mcast_gid.as_u32.dw[2], mg, arbelprm_mgm_entry_st,
1762                         mgid_63_32);
1763                 INS_FLD(mcast_gid.as_u32.dw[3], mg, arbelprm_mgm_entry_st,
1764                         mgid_31_0);
1765                 be_to_cpu_buf(mg +
1766                               MT_BYTE_OFFSET(arbelprm_mgm_entry_st,
1767                                              mgid_128_96), 16);
1768                 mgmqp_p = mg + MT_BYTE_OFFSET(arbelprm_mgm_entry_st, mgmqp_0);
1769                 INS_FLD(dev_ib_data.ipoib_qp.qpn, mgmqp_p, arbelprm_mgmqp_st,
1770                         qpn_i);
1771                 INS_FLD(add, mgmqp_p, arbelprm_mgmqp_st, qi);
1772                 rc = cmd_write_mgm(mg, mgid_hash);
1773         }
1774         return rc;
1775 }
1776
1777 static int clear_interrupt(void)
1778 {
1779         writel(dev_ib_data.clr_int_data, dev_ib_data.clr_int_addr);
1780         return 0;
1781 }
1782
1783 static struct ud_send_wqe_st *alloc_send_wqe(udqp_t qph)
1784 {
1785         struct udqp_st *qp = qph;
1786         __u32 idx;
1787
1788         if (qp->snd_wqe_cur_free) {
1789                 qp->snd_wqe_cur_free--;
1790                 idx = qp->snd_wqe_alloc_idx;
1791                 qp->snd_wqe_alloc_idx =
1792                     (qp->snd_wqe_alloc_idx + 1) & (qp->max_snd_wqes - 1);
1793                 return &qp->snd_wq[idx].wqe_cont.wqe;
1794         }
1795
1796         return NULL;
1797 }
1798
1799 static struct recv_wqe_st *alloc_rcv_wqe(struct udqp_st *qp)
1800 {
1801         __u32 idx;
1802
1803         if (qp->recv_wqe_cur_free) {
1804                 qp->recv_wqe_cur_free--;
1805                 idx = qp->recv_wqe_alloc_idx;
1806                 qp->recv_wqe_alloc_idx =
1807                     (qp->recv_wqe_alloc_idx + 1) & (qp->max_recv_wqes - 1);
1808                 return &qp->rcv_wq[idx].wqe_cont.wqe;
1809         }
1810
1811         return NULL;
1812 }
1813
1814 static int free_send_wqe(struct ud_send_wqe_st *wqe)
1815 {
1816         struct udqp_st *qp = ((struct ude_send_wqe_cont_st *)wqe)->qp;
1817         qp->snd_wqe_cur_free++;
1818
1819         return 0;
1820 }
1821
1822 static int free_rcv_wqe(struct recv_wqe_st *wqe)
1823 {
1824         struct udqp_st *qp = ((struct recv_wqe_cont_st *)wqe)->qp;
1825         qp->recv_wqe_cur_free++;
1826
1827         return 0;
1828 }
1829
1830 static int free_wqe(void *wqe)
1831 {
1832         int rc = 0;
1833         struct recv_wqe_st *rcv_wqe;
1834
1835 //      tprintf("free wqe= 0x%x", wqe);
1836         if ((wqe >= (void *)(dev_ib_data.ipoib_qp.rcv_wq)) &&
1837             (wqe <
1838              (void *)(&dev_ib_data.ipoib_qp.rcv_wq[NUM_IPOIB_RCV_WQES]))) {
1839                 /* ipoib receive wqe */
1840                 free_rcv_wqe(wqe);
1841                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.ipoib_qp);
1842                 if (rcv_wqe) {
1843                         rc = post_rcv_buf(&dev_ib_data.ipoib_qp, rcv_wqe);
1844                         if (rc) {
1845                                 eprintf("");
1846                         }
1847                 }
1848         } else if (wqe >= (void *)(dev_ib_data.ipoib_qp.snd_wq) &&
1849                    wqe <
1850                    (void *)(&dev_ib_data.ipoib_qp.snd_wq[NUM_IPOIB_SND_WQES])) {
1851                 /* ipoib send wqe */
1852                 free_send_wqe(wqe);
1853         } else if (wqe >= (void *)(dev_ib_data.mads_qp.rcv_wq) &&
1854                    wqe <
1855                    (void *)(&dev_ib_data.mads_qp.rcv_wq[NUM_MADS_RCV_WQES])) {
1856                 /* mads receive wqe */
1857                 free_rcv_wqe(wqe);
1858                 rcv_wqe = alloc_rcv_wqe(&dev_ib_data.mads_qp);
1859                 if (rcv_wqe) {
1860                         rc = post_rcv_buf(&dev_ib_data.mads_qp, rcv_wqe);
1861                         if (rc) {
1862                                 eprintf("");
1863                         }
1864                 }
1865         } else if (wqe >= (void *)(dev_ib_data.mads_qp.snd_wq) &&
1866                    wqe <
1867                    (void *)(&dev_ib_data.mads_qp.snd_wq[NUM_MADS_SND_WQES])) {
1868                 /* mads send wqe */
1869                 free_send_wqe(wqe);
1870         } else {
1871                 rc = -1;
1872                 eprintf("");
1873         }
1874
1875         return rc;
1876 }
1877
1878 static int update_eq_cons_idx(struct eq_st *eq)
1879 {
1880         writel(eq->cons_counter, eq->ci_base_base_addr);
1881         return 0;
1882 }
1883
1884 static void dev2ib_eqe(struct ib_eqe_st *ib_eqe_p, struct eqe_t *eqe_p)
1885 {
1886         void *tmp;
1887
1888         ib_eqe_p->event_type =
1889             EX_FLD(eqe_p, arbelprm_event_queue_entry_st, event_type);
1890
1891         tmp = eqe_p + MT_BYTE_OFFSET(arbelprm_event_queue_entry_st, event_data);
1892         ib_eqe_p->cqn = EX_FLD(tmp, arbelprm_completion_event_st, cqn);
1893 }
1894
1895 static int poll_eq(struct ib_eqe_st *ib_eqe_p, __u8 * num_eqes)
1896 {
1897         struct eqe_t eqe;
1898         u8 owner;
1899         int rc;
1900         u32 *ptr;
1901         struct eq_st *eq = &dev_ib_data.eq;
1902         __u32 cons_idx = eq->cons_counter & (eq->eq_size - 1);
1903
1904         ptr = (u32 *) (&(eq->eq_buf[cons_idx]));
1905         owner = (ptr[7] & 0x80000000) ? OWNER_HW : OWNER_SW;
1906         if (owner == OWNER_SW) {
1907                 eqe = eq->eq_buf[cons_idx];
1908                 be_to_cpu_buf(&eqe, sizeof(eqe));
1909                 dev2ib_eqe(ib_eqe_p, &eqe);
1910                 ptr[7] |= 0x80000000;
1911                 eq->eq_buf[cons_idx] = eqe;
1912                 eq->cons_counter++;
1913                 rc = update_eq_cons_idx(eq);
1914                 if (rc) {
1915                         return -1;
1916                 }
1917                 *num_eqes = 1;
1918         } else {
1919                 *num_eqes = 0;
1920         }
1921         return 0;
1922 }
1923
1924 static int ib_device_close(void)
1925 {
1926         iounmap(memfree_pci_dev.uar);
1927         iounmap(memfree_pci_dev.cr_space);
1928         return 0;
1929 }
1930
1931 static __u32 dev_get_qpn(void *qph)
1932 {
1933         struct udqp_st *qp = qph;
1934
1935         return qp->qpn;
1936 }
1937
1938 static void dev_post_dbell(void *dbell, __u32 offset)
1939 {
1940         __u32 *ptr;
1941         unsigned long address;
1942
1943         ptr = dbell;
1944
1945         if (((ptr[0] >> 24) & 0xff) != 1) {
1946                 eprintf("");
1947         }
1948         tprintf("ptr[0]= 0x%lx", ptr[0]);
1949         tprintf("ptr[1]= 0x%lx", ptr[1]);
1950         address = (unsigned long)(memfree_pci_dev.uar) + offset;
1951         tprintf("va=0x%lx pa=0x%lx", address,
1952                 virt_to_bus((const void *)address));
1953         writel(htonl(ptr[0]), memfree_pci_dev.uar + offset);
1954         barrier();
1955         address += 4;
1956         tprintf("va=0x%lx pa=0x%lx", address,
1957                 virt_to_bus((const void *)address));
1958         writel(htonl(ptr[1]), address /*memfree_pci_dev.uar + offset + 4 */ );
1959 }