[HW, TOOLS] fixed a bug, causing incorrect value of max_addr_handles in query_ca...
[mirror/winof/.git] / hw / mthca / kernel / mthca_cmd.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id$
34  */
35
36 #include <ib_mad.h>
37
38 #include "mthca_dev.h"
39 #if defined(EVENT_TRACING)
40 #ifdef offsetof
41 #undef offsetof
42 #endif
43 #include "mthca_cmd.tmh"
44 #endif
45 #include "mthca_config_reg.h"
46 #include "mthca_cmd.h"
47 #include "mthca_memfree.h"
48
49 #define CMD_POLL_TOKEN 0xffff
50
51 enum {
52         HCR_IN_PARAM_OFFSET    = 0x00,
53         HCR_IN_MODIFIER_OFFSET = 0x08,
54         HCR_OUT_PARAM_OFFSET   = 0x0c,
55         HCR_TOKEN_OFFSET       = 0x14,
56         HCR_STATUS_OFFSET      = 0x18,
57
58         HCR_OPMOD_SHIFT        = 12,
59         HCA_E_BIT              = 22,
60         HCR_GO_BIT             = 23
61 };
62
63 enum {
64         /* initialization and general commands */
65         CMD_SYS_EN          = 0x1,
66         CMD_SYS_DIS         = 0x2,
67         CMD_MAP_FA          = 0xfff,
68         CMD_UNMAP_FA        = 0xffe,
69         CMD_RUN_FW          = 0xff6,
70         CMD_MOD_STAT_CFG    = 0x34,
71         CMD_QUERY_DEV_LIM   = 0x3,
72         CMD_QUERY_FW        = 0x4,
73         CMD_ENABLE_LAM      = 0xff8,
74         CMD_DISABLE_LAM     = 0xff7,
75         CMD_QUERY_DDR       = 0x5,
76         CMD_QUERY_ADAPTER   = 0x6,
77         CMD_INIT_HCA        = 0x7,
78         CMD_CLOSE_HCA       = 0x8,
79         CMD_INIT_IB         = 0x9,
80         CMD_CLOSE_IB        = 0xa,
81         CMD_QUERY_HCA       = 0xb,
82         CMD_SET_IB          = 0xc,
83         CMD_ACCESS_DDR      = 0x2e,
84         CMD_MAP_ICM         = 0xffa,
85         CMD_UNMAP_ICM       = 0xff9,
86         CMD_MAP_ICM_AUX     = 0xffc,
87         CMD_UNMAP_ICM_AUX   = 0xffb,
88         CMD_SET_ICM_SIZE    = 0xffd,
89
90         /* TPT commands */
91         CMD_SW2HW_MPT       = 0xd,
92         CMD_QUERY_MPT       = 0xe,
93         CMD_HW2SW_MPT       = 0xf,
94         CMD_READ_MTT        = 0x10,
95         CMD_WRITE_MTT       = 0x11,
96         CMD_SYNC_TPT        = 0x2f,
97
98         /* EQ commands */
99         CMD_MAP_EQ          = 0x12,
100         CMD_SW2HW_EQ        = 0x13,
101         CMD_HW2SW_EQ        = 0x14,
102         CMD_QUERY_EQ        = 0x15,
103
104         /* CQ commands */
105         CMD_SW2HW_CQ        = 0x16,
106         CMD_HW2SW_CQ        = 0x17,
107         CMD_QUERY_CQ        = 0x18,
108         CMD_RESIZE_CQ       = 0x2c,
109
110         /* SRQ commands */
111         CMD_SW2HW_SRQ       = 0x35,
112         CMD_HW2SW_SRQ       = 0x36,
113         CMD_QUERY_SRQ       = 0x37,
114         CMD_ARM_SRQ         = 0x40,
115
116         /* QP/EE commands */
117         CMD_RST2INIT_QPEE   = 0x19,
118         CMD_INIT2RTR_QPEE   = 0x1a,
119         CMD_RTR2RTS_QPEE    = 0x1b,
120         CMD_RTS2RTS_QPEE    = 0x1c,
121         CMD_SQERR2RTS_QPEE  = 0x1d,
122         CMD_2ERR_QPEE       = 0x1e,
123         CMD_RTS2SQD_QPEE    = 0x1f,
124         CMD_SQD2SQD_QPEE    = 0x38,
125         CMD_SQD2RTS_QPEE    = 0x20,
126         CMD_ERR2RST_QPEE    = 0x21,
127         CMD_QUERY_QPEE      = 0x22,
128         CMD_INIT2INIT_QPEE  = 0x2d,
129         CMD_SUSPEND_QPEE    = 0x32,
130         CMD_UNSUSPEND_QPEE  = 0x33,
131         /* special QPs and management commands */
132         CMD_CONF_SPECIAL_QP = 0x23,
133         CMD_MAD_IFC         = 0x24,
134
135         /* multicast commands */
136         CMD_READ_MGM        = 0x25,
137         CMD_WRITE_MGM       = 0x26,
138         CMD_MGID_HASH       = 0x27,
139
140         /* miscellaneous commands */
141         CMD_DIAG_RPRT       = 0x30,
142         CMD_NOP             = 0x31,
143
144         /* debug commands */
145         CMD_QUERY_DEBUG_MSG = 0x2a,
146         CMD_SET_DEBUG_MSG   = 0x2b,
147 };
148
149 /*
150  * According to Mellanox code, FW may be starved and never complete
151  * commands.  So we can't use strict timeouts described in PRM -- we
152  * just arbitrarily select 60 seconds for now.
153  */
154 #define CMD_POLL_N_TRIES                60
155
156 enum {
157         CMD_TIME_CLASS_A = 60 * HZ,
158         CMD_TIME_CLASS_B = 60 * HZ,
159         CMD_TIME_CLASS_C = 60 * HZ
160 };
161
162 enum {
163         GO_BIT_TIMEOUT = 10 * HZ
164 };
165
166 #define GO_BIT_N_TRIES          5
167 #define GO_BIT_STALL_TIMEOUT            ((GO_BIT_TIMEOUT/HZ)/GO_BIT_N_TRIES)            /* usecs */
168
169 struct mthca_cmd_context {
170         KEVENT  event;
171         int               result;
172         int               next;
173         u64               out_param;
174         u16               token;
175         u8                status;
176 };
177
178 static inline int go_bit(struct mthca_dev *dev)
179 {
180         return readl(dev->hcr + HCR_STATUS_OFFSET) &
181                 _byteswap_ulong(1 << HCR_GO_BIT);
182 }
183
184 /* 
185 *       Function: performs busy-wait loop, while polling GO bit
186 *       Return: 0 when GO bit was extinguished in time 
187 */
188 static int poll_go_bit(struct mthca_dev *dev)
189 {
190         int i=0; /* init must be here !*/
191         
192         if (!go_bit(dev)) 
193                 return 0;
194
195         for (; i<GO_BIT_N_TRIES; i++) {
196                 /* Nope, stall for a little bit and try again. */
197                 KeStallExecutionProcessor( GO_BIT_STALL_TIMEOUT );
198                 if (!go_bit(dev))
199                         return 0;
200         }               
201         
202         return 1;
203 }
204
205 /* 
206 * Function: put thread on hold, while polling GO bit
207 * Return: 0 when GO bit was extinguished in time 
208 * Note: the functions make c. CMD_POLL_N_TRIES polls
209 */
210 static int wait_go_bit(struct mthca_dev *dev, unsigned long timeout_usecs)
211 {
212 #ifdef USE_FAIR_GO_BIT_POLLING  
213 //
214 // the algorithm polls 'go bit'  N_POLL_TRIES times with a polling interval,
215 // increasing from 0 to MAX_POLL_INTERVAL with step of POLL_INTERVAL_DELTA
216 //
217 // The values of the above contains are set voluntarily.
218 // They require evetual tuning for which reason the algorithm is extinguished for now.
219
220                 int i = 0;
221 #define POLL_INTERVAL_DELTA             5 *(-10)        // 10 usec
222 #define MAX_POLL_INTERVAL                       200 *(-10)      // 200 usec
223 #define N_POLL_TRIES                            40
224 #endif
225         u64 start, end;
226         LARGE_INTEGER  interval;
227
228         if (!go_bit(dev))       return 0;
229
230         interval.QuadPart = 0;
231         start = cl_get_time_stamp();
232         end = start + timeout_usecs;
233         while (go_bit(dev) && (cl_get_time_stamp() < end)) {
234                 KeDelayExecutionThread( KernelMode, FALSE, &interval );
235 #ifdef USE_FAIR_GO_BIT_POLLING  
236                 if (++i >= N_POLL_TRIES) {
237                         if ( (__int64)interval.QuadPart > (__int64)MAX_POLL_INTERVAL)
238                                 interval.QuadPart += POLL_INTERVAL_DELTA;
239                         i = 0;
240                 }
241 #endif          
242         }
243
244         if (!go_bit(dev))       return 0;
245         return 1;       
246 }
247
248
249 static int mthca_cmd_post(struct mthca_dev *dev,
250                           u64 in_param,
251                           u64 out_param,
252                           u32 in_modifier,
253                           u8 op_modifier,
254                           u16 op,
255                           u16 token,
256                           int event)
257 {
258         int err = 0;
259
260         down(&dev->cmd.hcr_mutex);
261
262         if (event && wait_go_bit(dev,GO_BIT_TIMEOUT)) {
263                 err = -EAGAIN;
264                 goto out;
265         }
266
267         /*
268          * We use writel (instead of something like memcpy_toio)
269          * because writes of less than 32 bits to the HCR don't work
270          * (and some architectures such as ia64 implement memcpy_toio
271          * in terms of writeb).
272          */
273         __raw_writel((u32) cl_hton32((u32)(in_param >> 32)),           (u8 *)dev->hcr + 0 * 4);
274         __raw_writel((u32) cl_hton32((u32)(in_param & 0xfffffffful)), (u8 *) dev->hcr + 1 * 4);
275         __raw_writel((u32) cl_hton32(in_modifier),              (u8 *)dev->hcr + 2 * 4);
276         __raw_writel((u32) cl_hton32((u32)(out_param >> 32)),          (u8 *)dev->hcr + 3 * 4);
277         __raw_writel((u32) cl_hton32((u32)(out_param & 0xfffffffful)), (u8 *)dev->hcr + 4 * 4);
278         __raw_writel((u32) cl_hton32(token << 16),              (u8 *)dev->hcr + 5 * 4);
279
280         /* __raw_writel may not order writes. */
281         wmb();
282
283         __raw_writel((u32) cl_hton32((1 << HCR_GO_BIT)                |
284                                                (event ? (1 << HCA_E_BIT) : 0)   |
285                                                (op_modifier << HCR_OPMOD_SHIFT) |
286                                                op),                       (u8 *)dev->hcr + 6 * 4);
287
288 out:
289         up(&dev->cmd.hcr_mutex);
290         return err;
291 }
292
293
294 static int mthca_cmd_poll(struct mthca_dev *dev,
295                           u64 in_param,
296                           u64 *out_param,
297                           int out_is_imm,
298                           u32 in_modifier,
299                           u8 op_modifier,
300                           u16 op,
301                           unsigned long timeout,
302                           u8 *status)
303 {
304         int err = 0;
305
306         sem_down(&dev->cmd.poll_sem);
307
308         err = mthca_cmd_post(dev, in_param,
309                              out_param ? *out_param : 0,
310                              in_modifier, op_modifier,
311                              op, CMD_POLL_TOKEN, 0);
312         if (err)
313                 goto out;
314
315         if (wait_go_bit(dev,timeout)) {
316                 err = -EBUSY;
317                 goto out;
318         }
319         
320         if (out_is_imm)
321                 *out_param = 
322                         (u64) cl_ntoh32((__be32)
323                                           __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
324                         (u64) cl_ntoh32((__be32)
325                                           __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
326
327         *status = (u8)(cl_ntoh32((__be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24);
328         if (*status)
329                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
330                           op, *status));
331
332 out:
333         sem_up(&dev->cmd.poll_sem);
334         return err;
335 }
336
337 void mthca_cmd_event(struct mthca_dev *dev,
338                      u16 token,
339                      u8  status,
340                      u64 out_param)
341 {
342         struct mthca_cmd_context *context =
343                 &dev->cmd.context[token & dev->cmd.token_mask];
344
345         /* previously timed out command completing at long last */
346         if (token != context->token)
347                 return;
348
349         context->result    = 0;
350         context->status    = status;
351         context->out_param = out_param;
352
353         context->token += dev->cmd.token_mask + 1;
354
355         ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
356         KeSetEvent( &context->event, 0, FALSE );
357 }
358
359 static int mthca_cmd_wait(struct mthca_dev *dev,
360                           u64 in_param,
361                           u64 *out_param,
362                           int out_is_imm,
363                           u32 in_modifier,
364                           u8 op_modifier,
365                           u16 op,
366                           unsigned long timeout,
367                           u8 *status)
368 {
369         int err = 0;
370         struct mthca_cmd_context *context;
371         SPIN_LOCK_PREP(lh);
372
373         sem_down(&dev->cmd.event_sem);
374
375         spin_lock( &dev->cmd.context_lock, &lh );
376         BUG_ON(dev->cmd.free_head < 0);
377         context = &dev->cmd.context[dev->cmd.free_head];
378         dev->cmd.free_head = context->next;
379         spin_unlock( &lh );
380
381         KeClearEvent(   &context->event );
382         err = mthca_cmd_post(dev, in_param,
383                              out_param ? *out_param : 0,
384                              in_modifier, op_modifier,
385                              op, context->token, 1);
386         if (err) {
387                 HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW,
388                         ("mthca_cmd_wait: Command %02x completed with err %02x\n", op, err));
389                 goto out;
390         }
391
392         {
393                 NTSTATUS res;
394                 LARGE_INTEGER  interval;
395                 interval.QuadPart = (-10)* (__int64)timeout;
396                 res = KeWaitForSingleObject( &context->event, Executive, KernelMode, FALSE,  &interval );
397                 if (res != STATUS_SUCCESS) {
398                         err = -EBUSY;
399                         HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW,
400                                 ("mthca_cmd_wait: Command %02x completed with err %02x\n", op, err));
401                         goto out;
402                 }
403         }
404
405         *status = context->status;
406         if (*status)
407                 HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
408                           op, *status));
409
410         if (out_is_imm)
411                 *out_param = context->out_param;
412
413 out:
414         spin_lock(&dev->cmd.context_lock, &lh);
415         context->next = dev->cmd.free_head;
416         dev->cmd.free_head = (int)(context - dev->cmd.context);
417         spin_unlock(&lh);
418
419         sem_up( &dev->cmd.event_sem );
420
421         return err;
422 }
423
424 /* Invoke a command with an output mailbox */
425 static int mthca_cmd_box(struct mthca_dev *dev,
426                          u64 in_param,
427                          u64 out_param,
428                          u32 in_modifier,
429                          u8 op_modifier,
430                          u16 op,
431                          unsigned long timeout,
432                          u8 *status)
433 {
434         if (dev->cmd.use_events)
435                 return mthca_cmd_wait(dev, in_param, &out_param, 0,
436                                       in_modifier, op_modifier, op,
437                                       timeout, status);
438         else
439                 return mthca_cmd_poll(dev, in_param, &out_param, 0,
440                                       in_modifier, op_modifier, op,
441                                       timeout, status);
442 }
443
444 /* Invoke a command with no output parameter */
445 static int mthca_cmd(struct mthca_dev *dev,
446                      u64 in_param,
447                      u32 in_modifier,
448                      u8 op_modifier,
449                      u16 op,
450                      unsigned long timeout,
451                      u8 *status)
452 {
453         return mthca_cmd_box(dev, in_param, 0, in_modifier,
454                              op_modifier, op, timeout, status);
455 }
456
457 /*
458  * Invoke a command with an immediate output parameter (and copy the
459  * output into the caller's out_param pointer after the command
460  * executes).
461  */
462 static int mthca_cmd_imm(struct mthca_dev *dev,
463                          u64 in_param,
464                          u64 *out_param,
465                          u32 in_modifier,
466                          u8 op_modifier,
467                          u16 op,
468                          unsigned long timeout,
469                          u8 *status)
470 {
471         if (dev->cmd.use_events)
472                 return mthca_cmd_wait(dev, in_param, out_param, 1,
473                                       in_modifier, op_modifier, op,
474                                       timeout, status);
475         else
476                 return mthca_cmd_poll(dev, in_param, out_param, 1,
477                                       in_modifier, op_modifier, op,
478                                       timeout, status);
479 }
480
481 int mthca_cmd_init(struct mthca_dev *dev)
482 {
483         KeInitializeMutex(&dev->cmd.hcr_mutex, 0);
484         sem_init(&dev->cmd.poll_sem, 1, 1);
485         dev->cmd.use_events = 0;
486
487         dev->hcr = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_HCR) + MTHCA_HCR_BASE,
488                            MTHCA_HCR_SIZE, &dev->hcr_size);
489         if (!dev->hcr) {
490                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map command register."));
491                 return -ENOMEM;
492         }
493
494         dev->cmd.pool = pci_pool_create("mthca_cmd", dev,
495                                         MTHCA_MAILBOX_SIZE,
496                                         MTHCA_MAILBOX_SIZE, 0);
497         if (!dev->cmd.pool) {
498                 iounmap(dev->hcr, dev->hcr_size);
499                 return -ENOMEM;
500         }
501
502         return 0;
503 }
504
505 void mthca_cmd_cleanup(struct mthca_dev *dev)
506 {
507         pci_pool_destroy(dev->cmd.pool);
508         iounmap(dev->hcr, dev->hcr_size);
509 }
510
511 /*
512  * Switch to using events to issue FW commands (should be called after
513  * event queue to command events has been initialized).
514  */
515 int mthca_cmd_use_events(struct mthca_dev *dev)
516 {
517         int i;
518
519         dev->cmd.context = kmalloc(dev->cmd.max_cmds *
520                                    sizeof (struct mthca_cmd_context),
521                                    GFP_KERNEL);
522         if (!dev->cmd.context)
523                 return -ENOMEM;
524
525         for (i = 0; i < dev->cmd.max_cmds; ++i) {
526                 dev->cmd.context[i].token = (u16)i;
527                 dev->cmd.context[i].next = i + 1;
528         KeInitializeEvent(      &dev->cmd.context[i].event, NotificationEvent , FALSE );
529         }
530
531         dev->cmd.context[dev->cmd.max_cmds - 1].next = -1;
532         dev->cmd.free_head = 0;
533
534         sem_init(&dev->cmd.event_sem, dev->cmd.max_cmds, LONG_MAX);
535         spin_lock_init(&dev->cmd.context_lock);
536
537         for (dev->cmd.token_mask = 1;
538              dev->cmd.token_mask < dev->cmd.max_cmds;
539              dev->cmd.token_mask <<= 1)
540                 ; /* nothing */
541         --dev->cmd.token_mask;
542
543         dev->cmd.use_events = 1;
544         sem_down(&dev->cmd.poll_sem);
545
546         return 0;
547 }
548
549 /*
550  * Switch back to polling (used when shutting down the device)
551  */
552 void mthca_cmd_use_polling(struct mthca_dev *dev)
553 {
554         int i;
555
556         dev->cmd.use_events = 0;
557
558         for (i = 0; i < dev->cmd.max_cmds; ++i)
559                 sem_down(&dev->cmd.event_sem);
560
561         kfree(dev->cmd.context);
562
563         sem_up(&dev->cmd.poll_sem);
564 }
565
566 struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
567                                           unsigned int gfp_mask)
568 {
569         struct mthca_mailbox *mailbox;
570
571         mailbox = kmalloc(sizeof *mailbox, gfp_mask);
572         if (!mailbox)
573                 return ERR_PTR(-ENOMEM);
574
575         mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
576         if (!mailbox->buf) {
577                 kfree(mailbox);
578                 return ERR_PTR(-ENOMEM);
579         }
580
581         return mailbox;
582 }
583
584 void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
585 {
586         if (!mailbox)
587                 return;
588
589         pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
590         kfree(mailbox);
591 }
592
593 int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
594 {
595         u64 out;
596         int ret;
597
598         ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status);
599
600         if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
601                 HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SYS_EN DDR error: syn=%x, sock=%d, "
602                            "sladdr=%d, SPD source=%s\n",
603                            (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
604                            (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM"));
605
606         return ret;
607 }
608
609 int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
610 {
611         return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status);
612 }
613
614 static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
615                          u64 virt, u8 *status)
616 {
617         struct mthca_mailbox *mailbox;
618         struct mthca_icm_iter iter;
619         __be64 *pages;
620         int lg;
621         int nent = 0;
622         unsigned long i;
623         int err = 0;
624         int ts = 0, tc = 0;
625         CPU_2_BE64_PREP;
626
627         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
628         if (IS_ERR(mailbox))
629                 return PTR_ERR(mailbox);
630         RtlZeroMemory(mailbox->buf, MTHCA_MAILBOX_SIZE);
631         pages = mailbox->buf;
632
633         for (mthca_icm_first(icm, &iter);
634              !mthca_icm_last(&iter);
635              mthca_icm_next(&iter)) {
636                 /*
637                  * We have to pass pages that are aligned to their
638                  * size, so find the least significant 1 in the
639                  * address or size and use that as our log2 size.
640                  */
641                 i = (u32)mthca_icm_addr(&iter) | mthca_icm_size(&iter);
642                 lg = ffs(i) - 1;
643                 if (lg < 12) {
644                         HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Got FW area not aligned to 4K (%I64x/%lx).\n",
645                                    (u64) mthca_icm_addr(&iter),
646                                    mthca_icm_size(&iter)));
647                         err = -EINVAL;
648                         goto out;
649                 }
650                 for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
651                         if (virt != -1) {
652                                 pages[nent * 2] = cl_hton64(virt);
653                                 virt += 1Ui64 << lg;
654                         }
655                         pages[nent * 2 + 1] = CPU_2_BE64((mthca_icm_addr(&iter) +
656                                                            (i << lg)) | (lg - 12));
657                         ts += 1 << (lg - 10);
658                         ++tc;
659
660                         if (++nent == MTHCA_MAILBOX_SIZE / 16) {
661                                 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
662                                                 CMD_TIME_CLASS_B, status);
663                                 if (err || *status)
664                                         goto out;
665                                 nent = 0;
666                         }
667                 }
668         }
669
670         if (nent)
671                 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
672                                 CMD_TIME_CLASS_B, status);
673
674         switch (op) {
675         case CMD_MAP_FA:
676                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for FW.\n", tc, ts));
677                 break;
678         case CMD_MAP_ICM_AUX:
679                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for ICM aux.\n", tc, ts));
680                 break;
681         case CMD_MAP_ICM:
682                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped %d chunks/%d KB at %I64x for ICM.\n",
683                           tc, ts, (u64) virt - (ts << 10)));
684                 break;
685         }
686
687 out:
688         mthca_free_mailbox(dev, mailbox);
689         return err;
690 }
691
692 int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
693 {
694         return mthca_map_cmd(dev, CMD_MAP_FA, icm, (u64)-1, status);
695 }
696
697 int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
698 {
699         return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
700 }
701
702 int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
703 {
704         return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
705 }
706
707 int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
708 {
709         struct mthca_mailbox *mailbox;
710         u32 *outbox;
711         int err = 0;
712         u8 lg;
713
714 #define QUERY_FW_OUT_SIZE             0x100
715 #define QUERY_FW_VER_OFFSET            0x00
716 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
717 #define QUERY_FW_ERR_START_OFFSET      0x30
718 #define QUERY_FW_ERR_SIZE_OFFSET       0x38
719
720 #define QUERY_FW_START_OFFSET          0x20
721 #define QUERY_FW_END_OFFSET            0x28
722
723 #define QUERY_FW_SIZE_OFFSET           0x00
724 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
725 #define QUERY_FW_EQ_ARM_BASE_OFFSET    0x40
726 #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
727
728         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
729         if (IS_ERR(mailbox))
730                 return PTR_ERR(mailbox);
731         outbox = mailbox->buf;
732
733         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
734                             CMD_TIME_CLASS_A, status);
735
736         if (err)
737                 goto out;
738
739         MTHCA_GET(dev->fw_ver,   outbox, QUERY_FW_VER_OFFSET);
740         /*
741          * FW subSIZE_Tor version is at more signifant bits than minor
742          * version, so swap here.
743          */
744         dev->fw_ver = (dev->fw_ver & 0xffff00000000Ui64) |
745                 ((dev->fw_ver & 0xffff0000Ui64) >> 16) |
746                 ((dev->fw_ver & 0x0000ffffUi64) << 16);
747
748         MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
749         dev->cmd.max_cmds = 1 << lg;
750         MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);      
751         MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
752
753         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW version %012I64x, max commands %d\n",
754                   (u64) dev->fw_ver, dev->cmd.max_cmds));
755         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Catastrophic error buffer at 0x%I64x, size 0x%x\n",
756                 (u64) dev->catas_err.addr, dev->catas_err.size));
757
758
759         if (mthca_is_memfree(dev)) {
760                 MTHCA_GET(dev->fw.arbel.fw_pages,       outbox, QUERY_FW_SIZE_OFFSET);
761                 MTHCA_GET(dev->fw.arbel.clr_int_base,   outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
762                 MTHCA_GET(dev->fw.arbel.eq_arm_base,    outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
763                 MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET);
764                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("FW size %d KB\n", dev->fw.arbel.fw_pages << 2));
765
766                 /*
767                  * Arbel page size is always 4 KB; round up number of
768                  * system pages needed.
769                  */
770                 dev->fw.arbel.fw_pages =
771                         ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
772                                 (PAGE_SHIFT - 12);
773
774                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Clear int @ %I64x, EQ arm @ %I64x, EQ set CI @ %I64x\n",
775                           (u64) dev->fw.arbel.clr_int_base,
776                           (u64) dev->fw.arbel.eq_arm_base,
777                           (u64) dev->fw.arbel.eq_set_ci_base));
778         } else {
779                 MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
780                 MTHCA_GET(dev->fw.tavor.fw_end,   outbox, QUERY_FW_END_OFFSET);
781
782                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW size %d KB (start %I64x, end %I64x)\n",
783                           (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
784                           (u64) dev->fw.tavor.fw_start,
785                           (u64) dev->fw.tavor.fw_end));
786         }
787
788 out:
789         mthca_free_mailbox(dev, mailbox);
790         return err;
791 }
792
793 int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
794 {
795         struct mthca_mailbox *mailbox;
796         u8 info;
797         u32 *outbox;
798         int err = 0;
799
800 #define ENABLE_LAM_OUT_SIZE         0x100
801 #define ENABLE_LAM_START_OFFSET     0x00
802 #define ENABLE_LAM_END_OFFSET       0x08
803 #define ENABLE_LAM_INFO_OFFSET      0x13
804
805 #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
806 #define ENABLE_LAM_INFO_ECC_MASK    0x3
807
808         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
809         if (IS_ERR(mailbox))
810                 return PTR_ERR(mailbox);
811         outbox = mailbox->buf;
812
813         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
814                             CMD_TIME_CLASS_C, status);
815
816         if (err)
817                 goto out;
818
819         if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
820                 goto out;
821
822         MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
823         MTHCA_GET(dev->ddr_end,   outbox, ENABLE_LAM_END_OFFSET);
824         MTHCA_GET(info,           outbox, ENABLE_LAM_INFO_OFFSET);
825
826         if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) !=
827             !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
828                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
829                            "is %s hidden; does not match PCI config\n",
830                            (info & ENABLE_LAM_INFO_HIDDEN_FLAG)?
831                            "" : "not"));
832         }
833         if (info & ENABLE_LAM_INFO_HIDDEN_FLAG)
834                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
835
836         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
837                   (int) ((dev->ddr_end - dev->ddr_start) >> 10),
838                   (u64) dev->ddr_start,
839                   (u64) dev->ddr_end));
840
841 out:
842         mthca_free_mailbox(dev, mailbox);
843         return err;
844 }
845
846 int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
847 {
848         return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
849 }
850
851 int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
852 {
853         struct mthca_mailbox *mailbox;
854         u8 info;
855         u32 *outbox;
856         int err = 0;
857
858 #define QUERY_DDR_OUT_SIZE         0x100
859 #define QUERY_DDR_START_OFFSET     0x00
860 #define QUERY_DDR_END_OFFSET       0x08
861 #define QUERY_DDR_INFO_OFFSET      0x13
862
863 #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
864 #define QUERY_DDR_INFO_ECC_MASK    0x3
865
866         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
867         if (IS_ERR(mailbox))
868                 return PTR_ERR(mailbox);
869         outbox = mailbox->buf;
870
871         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
872                             CMD_TIME_CLASS_A, status);
873
874         if (err)
875                 goto out;
876
877         MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET);
878         MTHCA_GET(dev->ddr_end,   outbox, QUERY_DDR_END_OFFSET);
879         MTHCA_GET(info,           outbox, QUERY_DDR_INFO_OFFSET);
880
881         if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) !=
882             !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
883
884                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
885                            "is %s hidden; does not match PCI config\n",
886                            (info & QUERY_DDR_INFO_HIDDEN_FLAG) ?
887                            "" : "not"));
888         }
889         if (info & QUERY_DDR_INFO_HIDDEN_FLAG)
890                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
891
892         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
893                   (int) ((dev->ddr_end - dev->ddr_start) >> 10),
894                   (u64) dev->ddr_start,
895                   (u64) dev->ddr_end));
896
897 out:
898         mthca_free_mailbox(dev, mailbox);
899         return err;
900 }
901
902 int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
903                         struct mthca_dev_lim *dev_lim, u8 *status)
904 {
905         struct mthca_mailbox *mailbox;
906         u32 *outbox;
907         u8 field;
908         u16 size;
909         int err;
910
911 #define QUERY_DEV_LIM_OUT_SIZE             0x100
912 #define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET     0x10
913 #define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET      0x11
914 #define QUERY_DEV_LIM_RSVD_QP_OFFSET        0x12
915 #define QUERY_DEV_LIM_MAX_QP_OFFSET         0x13
916 #define QUERY_DEV_LIM_RSVD_SRQ_OFFSET       0x14
917 #define QUERY_DEV_LIM_MAX_SRQ_OFFSET        0x15
918 #define QUERY_DEV_LIM_RSVD_EEC_OFFSET       0x16
919 #define QUERY_DEV_LIM_MAX_EEC_OFFSET        0x17
920 #define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET      0x19
921 #define QUERY_DEV_LIM_RSVD_CQ_OFFSET        0x1a
922 #define QUERY_DEV_LIM_MAX_CQ_OFFSET         0x1b
923 #define QUERY_DEV_LIM_MAX_MPT_OFFSET        0x1d
924 #define QUERY_DEV_LIM_RSVD_EQ_OFFSET        0x1e
925 #define QUERY_DEV_LIM_MAX_EQ_OFFSET         0x1f
926 #define QUERY_DEV_LIM_RSVD_MTT_OFFSET       0x20
927 #define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET     0x21
928 #define QUERY_DEV_LIM_RSVD_MRW_OFFSET       0x22
929 #define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET    0x23
930 #define QUERY_DEV_LIM_MAX_AV_OFFSET         0x27
931 #define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET     0x29
932 #define QUERY_DEV_LIM_MAX_RES_QP_OFFSET     0x2b
933 #define QUERY_DEV_LIM_MAX_RDMA_OFFSET       0x2f
934 #define QUERY_DEV_LIM_RSZ_SRQ_OFFSET        0x33
935 #define QUERY_DEV_LIM_ACK_DELAY_OFFSET      0x35
936 #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET      0x36
937 #define QUERY_DEV_LIM_VL_PORT_OFFSET        0x37
938 #define QUERY_DEV_LIM_MAX_GID_OFFSET        0x3b
939 #define QUERY_DEV_LIM_MAX_PKEY_OFFSET       0x3f
940 #define QUERY_DEV_LIM_FLAGS_OFFSET          0x44
941 #define QUERY_DEV_LIM_RSVD_UAR_OFFSET       0x48
942 #define QUERY_DEV_LIM_UAR_SZ_OFFSET         0x49
943 #define QUERY_DEV_LIM_PAGE_SZ_OFFSET        0x4b
944 #define QUERY_DEV_LIM_MAX_SG_OFFSET         0x51
945 #define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET    0x52
946 #define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET      0x55
947 #define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56
948 #define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET     0x61
949 #define QUERY_DEV_LIM_RSVD_MCG_OFFSET       0x62
950 #define QUERY_DEV_LIM_MAX_MCG_OFFSET        0x63
951 #define QUERY_DEV_LIM_RSVD_PD_OFFSET        0x64
952 #define QUERY_DEV_LIM_MAX_PD_OFFSET         0x65
953 #define QUERY_DEV_LIM_RSVD_RDD_OFFSET       0x66
954 #define QUERY_DEV_LIM_MAX_RDD_OFFSET        0x67
955 #define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET   0x80
956 #define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET   0x82
957 #define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET  0x84
958 #define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET  0x86
959 #define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET   0x88
960 #define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET   0x8a
961 #define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET   0x8c
962 #define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET   0x8e
963 #define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET   0x90
964 #define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET   0x92
965 #define QUERY_DEV_LIM_PBL_SZ_OFFSET         0x96
966 #define QUERY_DEV_LIM_BMME_FLAGS_OFFSET     0x97
967 #define QUERY_DEV_LIM_RSVD_LKEY_OFFSET      0x98
968 #define QUERY_DEV_LIM_LAMR_OFFSET           0x9f
969 #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET     0xa0
970
971         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
972         if (IS_ERR(mailbox))
973                 return PTR_ERR(mailbox);
974         outbox = mailbox->buf;
975
976         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
977                             CMD_TIME_CLASS_A, status);
978
979         if (err)
980                 goto out;
981
982         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
983         dev_lim->reserved_qps = 1 << (field & 0xf);
984         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
985         dev_lim->max_qps = 1 << (field & 0x1f);
986         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET);
987         dev_lim->reserved_srqs = 1 << (field >> 4);
988         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET);
989         dev_lim->max_srqs = 1 << (field & 0x1f);
990         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET);
991         dev_lim->reserved_eecs = 1 << (field & 0xf);
992         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET);
993         dev_lim->max_eecs = 1 << (field & 0x1f);
994         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET);
995         dev_lim->max_cq_sz = 1 << field;
996         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET);
997         dev_lim->reserved_cqs = 1 << (field & 0xf);
998         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET);
999         dev_lim->max_cqs = 1 << (field & 0x1f);
1000         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET);
1001         dev_lim->max_mpts = 1 << (field & 0x3f);
1002         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET);
1003         dev_lim->reserved_eqs = 1 << (field & 0xf);
1004         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET);
1005         dev_lim->max_eqs = 1 << (field & 0x7);
1006         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
1007         dev_lim->reserved_mtts = 1 << (field >> 4);
1008         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
1009         dev_lim->max_mrw_sz = 1 << field;
1010         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET);
1011         dev_lim->reserved_mrws = 1 << (field & 0xf);
1012         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET);
1013         dev_lim->max_mtt_seg = 1 << (field & 0x3f);
1014         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET);
1015         dev_lim->max_requester_per_qp = 1 << (field & 0x3f);
1016         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET);
1017         dev_lim->max_responder_per_qp = 1 << (field & 0x3f);
1018         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET);
1019         dev_lim->max_rdma_global = 1 << (field & 0x3f);
1020         MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET);
1021         dev_lim->local_ca_ack_delay = field & 0x1f;
1022         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET);
1023         dev_lim->max_mtu        = field >> 4;
1024         dev_lim->max_port_width = field & 0xf;
1025         MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET);
1026         dev_lim->max_vl    = field >> 4;
1027         dev_lim->num_ports = field & 0xf;
1028         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
1029         dev_lim->max_gids = 1 << (field & 0xf);
1030         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
1031         dev_lim->max_pkeys = 1 << (field & 0xf);
1032         MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
1033         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET);
1034         dev_lim->reserved_uars = field >> 4;
1035         MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET);
1036         dev_lim->uar_size = 1 << ((field & 0x3f) + 20);
1037         MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET);
1038         dev_lim->min_page_sz = 1 << field;
1039         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET);
1040         dev_lim->max_sg = field;
1041
1042         MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET);
1043         dev_lim->max_desc_sz = size;
1044
1045         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET);
1046         dev_lim->max_qp_per_mcg = 1 << field;
1047         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET);
1048         dev_lim->reserved_mgms = field & 0xf;
1049         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET);
1050         dev_lim->max_mcgs = 1 << field;
1051         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET);
1052         dev_lim->reserved_pds = field >> 4;
1053         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET);
1054         dev_lim->max_pds = 1 << (field & 0x3f);
1055         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET);
1056         dev_lim->reserved_rdds = field >> 4;
1057         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET);
1058         dev_lim->max_rdds = 1 << (field & 0x3f);
1059
1060         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET);
1061         dev_lim->eec_entry_sz = size;
1062         MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET);
1063         dev_lim->qpc_entry_sz = size;
1064         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET);
1065         dev_lim->eeec_entry_sz = size;
1066         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET);
1067         dev_lim->eqpc_entry_sz = size;
1068         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET);
1069         dev_lim->eqc_entry_sz = size;
1070         MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET);
1071         dev_lim->cqc_entry_sz = size;
1072         MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET);
1073         dev_lim->srq_entry_sz = size;
1074         MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
1075         dev_lim->uar_scratch_entry_sz = size;
1076
1077         if (mthca_is_memfree(dev)) {
1078                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1079                 dev_lim->max_srq_sz = 1 << field;
1080                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1081                 dev_lim->max_qp_sz = 1 << field;
1082                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
1083                 dev_lim->hca.arbel.resize_srq = field & 1;
1084                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
1085                 dev_lim->max_sg = min(field, dev_lim->max_sg);
1086                 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
1087                 dev_lim->max_desc_sz = min((int)size, dev_lim->max_desc_sz);            
1088                 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
1089                 dev_lim->mpt_entry_sz = size;
1090                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
1091                 dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f);
1092                 MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox,
1093                           QUERY_DEV_LIM_BMME_FLAGS_OFFSET);
1094                 MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox,
1095                           QUERY_DEV_LIM_RSVD_LKEY_OFFSET);
1096                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET);
1097                 dev_lim->hca.arbel.lam_required = field & 1;
1098                 MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox,
1099                           QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET);
1100
1101                 if (dev_lim->hca.arbel.bmme_flags & 1){
1102                         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Base MM extensions: yes "
1103                                   "(flags %d, max PBL %d, rsvd L_Key %08x)\n",
1104                                   dev_lim->hca.arbel.bmme_flags,
1105                                   dev_lim->hca.arbel.max_pbl_sz,
1106                                   dev_lim->hca.arbel.reserved_lkey));
1107                 }else{
1108                         HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Base MM extensions: no\n"));
1109                 }
1110
1111                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max ICM size %I64d MB\n",
1112                           (u64) dev_lim->hca.arbel.max_icm_sz >> 20));
1113         } 
1114         else {
1115                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1116                 dev_lim->max_srq_sz = (1 << field) - 1;
1117                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1118                 dev_lim->max_qp_sz = (1 << field) - 1;
1119                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
1120                 dev_lim->hca.tavor.max_avs = 1I64 << (field & 0x3f);
1121                 dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
1122         }
1123
1124         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1125                   dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz));
1126         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1127                   dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz));
1128         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1129                   dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz));
1130         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max EQs: %d, reserved EQs: %d, entry size: %d\n",
1131                   dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz));
1132         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("reserved MPTs: %d, reserved MTTs: %d\n",
1133                   dev_lim->reserved_mrws, dev_lim->reserved_mtts));
1134         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1135                   dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars));
1136         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QP/MCG: %d, reserved MGMs: %d\n",
1137                   dev_lim->max_pds, dev_lim->reserved_mgms));
1138         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1139                   dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz));
1140
1141         HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Flags: %08x\n", dev_lim->flags));
1142
1143 out:
1144         mthca_free_mailbox(dev, mailbox);
1145         return err;
1146 }
1147
1148 static void get_board_id(u8 *vsd, char *board_id)
1149 {
1150         int i;
1151
1152 #define VSD_OFFSET_SIG1         0x00
1153 #define VSD_OFFSET_SIG2         0xde
1154 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1155 #define VSD_OFFSET_TS_BOARD_ID  0x20
1156
1157 #define VSD_SIGNATURE_TOPSPIN   0x5ad
1158
1159         RtlZeroMemory(board_id, MTHCA_BOARD_ID_LEN);
1160
1161         if (cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG1)) == VSD_SIGNATURE_TOPSPIN &&
1162             cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG2)) == VSD_SIGNATURE_TOPSPIN) {
1163                 strlcpy(board_id, (const char *)(vsd + VSD_OFFSET_TS_BOARD_ID), MTHCA_BOARD_ID_LEN);
1164         } else {
1165                 /*
1166                  * The board ID is a string but the firmware byte
1167                  * swaps each 4-byte word before passing it back to
1168                  * us.  Therefore we need to swab it before printing.
1169                  */
1170                 for (i = 0; i < 4; ++i)
1171                         ((u32 *) board_id)[i] =
1172                                 _byteswap_ulong(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1173         }
1174 }
1175
1176 int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1177                         struct mthca_adapter *adapter, u8 *status)
1178 {
1179         struct mthca_mailbox *mailbox;
1180         u32 *outbox;
1181         int err;
1182
1183 #define QUERY_ADAPTER_OUT_SIZE             0x100
1184 #define QUERY_ADAPTER_VENDOR_ID_OFFSET     0x00
1185 #define QUERY_ADAPTER_DEVICE_ID_OFFSET     0x04
1186 #define QUERY_ADAPTER_REVISION_ID_OFFSET   0x08
1187 #define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1188 #define QUERY_ADAPTER_VSD_OFFSET           0x20
1189
1190         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1191         if (IS_ERR(mailbox))
1192                 return PTR_ERR(mailbox);
1193         outbox = mailbox->buf;
1194
1195         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1196                             CMD_TIME_CLASS_A, status);
1197
1198         if (err)
1199                 goto out;
1200
1201         MTHCA_GET(adapter->vendor_id, outbox,   QUERY_ADAPTER_VENDOR_ID_OFFSET);
1202         MTHCA_GET(adapter->device_id, outbox,   QUERY_ADAPTER_DEVICE_ID_OFFSET);
1203         MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1204         MTHCA_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1205
1206         get_board_id((u8*)outbox + QUERY_ADAPTER_VSD_OFFSET,
1207                      adapter->board_id);
1208
1209 out:
1210         mthca_free_mailbox(dev, mailbox);
1211         return err;
1212 }
1213
1214 int mthca_INIT_HCA(struct mthca_dev *dev,
1215                    struct mthca_init_hca_param *param,
1216                    u8 *status)
1217 {
1218         struct mthca_mailbox *mailbox;
1219         __be32 *inbox;
1220         int err;
1221
1222 #define INIT_HCA_IN_SIZE                 0x200
1223 #define INIT_HCA_FLAGS_OFFSET            0x014
1224 #define INIT_HCA_QPC_OFFSET              0x020
1225 #define  INIT_HCA_QPC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x10)
1226 #define  INIT_HCA_LOG_QP_OFFSET          (INIT_HCA_QPC_OFFSET + 0x17)
1227 #define  INIT_HCA_EEC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x20)
1228 #define  INIT_HCA_LOG_EEC_OFFSET         (INIT_HCA_QPC_OFFSET + 0x27)
1229 #define  INIT_HCA_SRQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x28)
1230 #define  INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
1231 #define  INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
1232 #define  INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
1233 #define  INIT_HCA_EQPC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
1234 #define  INIT_HCA_EEEC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
1235 #define  INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
1236 #define  INIT_HCA_LOG_EQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x67)
1237 #define  INIT_HCA_RDB_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x70)
1238 #define INIT_HCA_UDAV_OFFSET             0x0b0
1239 #define  INIT_HCA_UDAV_LKEY_OFFSET       (INIT_HCA_UDAV_OFFSET + 0x0)
1240 #define  INIT_HCA_UDAV_PD_OFFSET         (INIT_HCA_UDAV_OFFSET + 0x4)
1241 #define INIT_HCA_MCAST_OFFSET            0x0c0
1242 #define  INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
1243 #define  INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1244 #define  INIT_HCA_MC_HASH_SZ_OFFSET      (INIT_HCA_MCAST_OFFSET + 0x16)
1245 #define  INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1246 #define INIT_HCA_TPT_OFFSET              0x0f0
1247 #define  INIT_HCA_MPT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x00)
1248 #define  INIT_HCA_MTT_SEG_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x09)
1249 #define  INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
1250 #define  INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
1251 #define INIT_HCA_UAR_OFFSET              0x120
1252 #define  INIT_HCA_UAR_BASE_OFFSET        (INIT_HCA_UAR_OFFSET + 0x00)
1253 #define  INIT_HCA_UARC_SZ_OFFSET         (INIT_HCA_UAR_OFFSET + 0x09)
1254 #define  INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
1255 #define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1256 #define  INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
1257 #define  INIT_HCA_UAR_CTX_BASE_OFFSET    (INIT_HCA_UAR_OFFSET + 0x18)
1258
1259         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1260         if (IS_ERR(mailbox))
1261                 return PTR_ERR(mailbox);
1262         inbox = mailbox->buf;
1263
1264         RtlZeroMemory(inbox, INIT_HCA_IN_SIZE);
1265
1266 #if defined(__LITTLE_ENDIAN)
1267         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cl_hton32(1 << 1);
1268 #elif defined(__BIG_ENDIAN)
1269         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1 << 1);
1270 #else
1271 #error Host endianness not defined
1272 #endif
1273         /* Check port for UD address vector: */
1274         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1);
1275
1276         /* We leave wqe_quota, responder_exu, etc as 0 (default) */
1277
1278         /* QPC/EEC/CQC/EQC/RDB attributes */
1279
1280         MTHCA_PUT(inbox, param->qpc_base,     INIT_HCA_QPC_BASE_OFFSET);
1281         MTHCA_PUT(inbox, param->log_num_qps,  INIT_HCA_LOG_QP_OFFSET);
1282         MTHCA_PUT(inbox, param->eec_base,     INIT_HCA_EEC_BASE_OFFSET);
1283         MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET);
1284         MTHCA_PUT(inbox, param->srqc_base,    INIT_HCA_SRQC_BASE_OFFSET);
1285         MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1286         MTHCA_PUT(inbox, param->cqc_base,     INIT_HCA_CQC_BASE_OFFSET);
1287         MTHCA_PUT(inbox, param->log_num_cqs,  INIT_HCA_LOG_CQ_OFFSET);
1288         MTHCA_PUT(inbox, param->eqpc_base,    INIT_HCA_EQPC_BASE_OFFSET);
1289         MTHCA_PUT(inbox, param->eeec_base,    INIT_HCA_EEEC_BASE_OFFSET);
1290         MTHCA_PUT(inbox, param->eqc_base,     INIT_HCA_EQC_BASE_OFFSET);
1291         MTHCA_PUT(inbox, param->log_num_eqs,  INIT_HCA_LOG_EQ_OFFSET);
1292         MTHCA_PUT(inbox, param->rdb_base,     INIT_HCA_RDB_BASE_OFFSET);
1293
1294         /* UD AV attributes */
1295
1296         /* multicast attributes */
1297
1298         MTHCA_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
1299         MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1300         MTHCA_PUT(inbox, param->mc_hash_sz,      INIT_HCA_MC_HASH_SZ_OFFSET);
1301         MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1302
1303         /* TPT attributes */
1304
1305         MTHCA_PUT(inbox, param->mpt_base,   INIT_HCA_MPT_BASE_OFFSET);
1306         if (!mthca_is_memfree(dev))
1307                 MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
1308         MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1309         MTHCA_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
1310
1311         /* UAR attributes */
1312         {
1313                 u8 uar_page_sz = PAGE_SHIFT - 12;
1314                 MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1315         }
1316
1317         MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
1318
1319         if (mthca_is_memfree(dev)) {
1320                 MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
1321                 MTHCA_PUT(inbox, param->log_uar_sz,  INIT_HCA_LOG_UAR_SZ_OFFSET);
1322                 MTHCA_PUT(inbox, param->uarc_base,   INIT_HCA_UAR_CTX_BASE_OFFSET);
1323         }
1324
1325         err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
1326
1327         mthca_free_mailbox(dev, mailbox);
1328         return err;
1329 }
1330
1331 int mthca_INIT_IB(struct mthca_dev *dev,
1332                   struct mthca_init_ib_param *param,
1333                   int port, u8 *status)
1334 {
1335         struct mthca_mailbox *mailbox;
1336         u32 *inbox;
1337         int err;
1338         u32 flags;
1339
1340 #define INIT_IB_IN_SIZE                                         56
1341 #define INIT_IB_FLAGS_OFFSET                    0x00
1342 #define INIT_IB_FLAG_SIG                                        (1 << 18)
1343 #define INIT_IB_FLAG_NG                                         (1 << 17)
1344 #define INIT_IB_FLAG_G0                                         (1 << 16)
1345 #define INIT_IB_VL_SHIFT                                        4
1346 #define INIT_IB_PORT_WIDTH_SHIFT        8
1347 #define INIT_IB_MTU_SHIFT                               12
1348 #define INIT_IB_MAX_GID_OFFSET                  0x06
1349 #define INIT_IB_MAX_PKEY_OFFSET         0x0a
1350 #define INIT_IB_GUID0_OFFSET                    0x10
1351 #define INIT_IB_NODE_GUID_OFFSET        0x18
1352 #define INIT_IB_SI_GUID_OFFSET                  0x20
1353
1354         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1355         if (IS_ERR(mailbox))
1356                 return PTR_ERR(mailbox);
1357         inbox = mailbox->buf;
1358
1359         RtlZeroMemory(inbox, INIT_IB_IN_SIZE);
1360
1361         flags = 0;
1362         flags |= param->set_guid0     ? INIT_IB_FLAG_G0  : 0;
1363         flags |= param->set_node_guid ? INIT_IB_FLAG_NG  : 0;
1364         flags |= param->set_si_guid   ? INIT_IB_FLAG_SIG : 0;
1365         flags |= param->vl_cap << INIT_IB_VL_SHIFT;
1366         flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
1367         flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
1368         MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
1369
1370         MTHCA_PUT(inbox, param->gid_cap,   INIT_IB_MAX_GID_OFFSET);
1371         MTHCA_PUT(inbox, param->pkey_cap,  INIT_IB_MAX_PKEY_OFFSET);
1372         MTHCA_PUT(inbox, param->guid0,     INIT_IB_GUID0_OFFSET);
1373         MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
1374         MTHCA_PUT(inbox, param->si_guid,   INIT_IB_SI_GUID_OFFSET);
1375
1376         err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1377                         CMD_TIME_CLASS_A, status);
1378
1379         mthca_free_mailbox(dev, mailbox);
1380         return err;
1381 }
1382
1383 int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
1384 {
1385         return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status);
1386 }
1387
1388 int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
1389 {
1390         return mthca_cmd(dev, 0, 0, (u8)panic, CMD_CLOSE_HCA, HZ, status);
1391 }
1392
1393 int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1394                  int port, u8 *status)
1395 {
1396         struct mthca_mailbox *mailbox;
1397         u32 *inbox;
1398         int err;
1399         u32 flags = 0;
1400
1401 #define SET_IB_IN_SIZE         0x40
1402 #define SET_IB_FLAGS_OFFSET    0x00
1403 #define SET_IB_FLAG_SIG        (1 << 18)
1404 #define SET_IB_FLAG_RQK        (1 <<  0)
1405 #define SET_IB_CAP_MASK_OFFSET 0x04
1406 #define SET_IB_SI_GUID_OFFSET  0x08
1407
1408         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1409         if (IS_ERR(mailbox))
1410                 return PTR_ERR(mailbox);
1411         inbox = mailbox->buf;
1412
1413         RtlZeroMemory(inbox, SET_IB_IN_SIZE);
1414
1415         flags |= param->set_si_guid     ? SET_IB_FLAG_SIG : 0;
1416         flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0;
1417         MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET);
1418
1419         MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
1420         MTHCA_PUT(inbox, param->si_guid,  SET_IB_SI_GUID_OFFSET);
1421
1422         err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1423                         CMD_TIME_CLASS_B, status);
1424
1425         mthca_free_mailbox(dev, mailbox);
1426         return err;
1427 }
1428
1429 int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
1430 {
1431         return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
1432 }
1433
1434 int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1435 {
1436         struct mthca_mailbox *mailbox;
1437         __be64 *inbox;
1438         int err;
1439
1440         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1441         if (IS_ERR(mailbox))
1442                 return PTR_ERR(mailbox);
1443         inbox = mailbox->buf;
1444
1445         inbox[0] = cl_hton64(virt);
1446         inbox[1] = cl_hton64(dma_addr);
1447
1448         err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1449                         CMD_TIME_CLASS_B, status);
1450
1451         mthca_free_mailbox(dev, mailbox);
1452
1453         if (!err)
1454                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped page at %I64x to %I64x for ICM.\n",
1455                           (u64) dma_addr, (u64) virt));
1456
1457         return err;
1458 }
1459
1460 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
1461 {
1462         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Unmapping %d pages at %I64x from ICM.\n",
1463                   page_count, (u64) virt));
1464
1465         return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
1466 }
1467
1468 int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
1469 {
1470         return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, (u64)-1, status);
1471 }
1472
1473 int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
1474 {
1475         return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
1476 }
1477
1478 int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1479                        u8 *status)
1480 {
1481         int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
1482                                 CMD_TIME_CLASS_A, status);
1483
1484         if (ret || status)
1485                 return ret;
1486
1487         /*
1488          * Arbel page size is always 4 KB; round up number of system
1489          * pages needed.
1490          */
1491         *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
1492         *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
1493
1494         return 0;
1495 }
1496
1497 int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1498                     int mpt_index, u8 *status)
1499 {
1500         return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1501                          CMD_TIME_CLASS_B, status);
1502 }
1503
1504 int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1505                     int mpt_index, u8 *status)
1506 {
1507         return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1508                              (u8)!mailbox, CMD_HW2SW_MPT,
1509                              CMD_TIME_CLASS_B, status);
1510 }
1511
1512 int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1513                     int num_mtt, u8 *status)
1514 {
1515         return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1516                          CMD_TIME_CLASS_B, status);
1517 }
1518
1519 int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
1520 {
1521         return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status);
1522 }
1523
1524 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1525                  int eq_num, u8 *status)
1526 {
1527         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("%s mask %016I64x for eqn %d\n",
1528                   unmap ? "Clearing" : "Setting",
1529                   (u64) event_mask, eq_num));
1530         return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
1531                          0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
1532 }
1533
1534 int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1535                    int eq_num, u8 *status)
1536 {
1537         return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1538                          CMD_TIME_CLASS_A, status);
1539 }
1540
1541 int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1542                    int eq_num, u8 *status)
1543 {
1544         return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1545                              CMD_HW2SW_EQ,
1546                              CMD_TIME_CLASS_A, status);
1547 }
1548
1549 int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1550                    int cq_num, u8 *status)
1551 {
1552         return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1553                         CMD_TIME_CLASS_A, status);
1554 }
1555
1556 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1557                    int cq_num, u8 *status)
1558 {
1559         return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1560                              CMD_HW2SW_CQ,
1561                              CMD_TIME_CLASS_A, status);
1562 }
1563
1564 int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1565                     int srq_num, u8 *status)
1566 {
1567         return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1568                         CMD_TIME_CLASS_A, status);
1569 }
1570
1571 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1572                     int srq_num, u8 *status)
1573 {
1574         return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1575                              CMD_HW2SW_SRQ,
1576                              CMD_TIME_CLASS_A, status);
1577 }
1578
1579 int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
1580                     struct mthca_mailbox *mailbox, u8 *status)
1581 {
1582         return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
1583                              CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
1584 }
1585
1586 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1587 {
1588         return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1589                          CMD_TIME_CLASS_B, status);
1590 }
1591
1592 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1593                     int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1594                     u8 *status)
1595 {
1596         enum {
1597                 MTHCA_TRANS_INVALID = 0,
1598                 MTHCA_TRANS_RST2INIT,
1599                 MTHCA_TRANS_INIT2INIT,
1600                 MTHCA_TRANS_INIT2RTR,
1601                 MTHCA_TRANS_RTR2RTS,
1602                 MTHCA_TRANS_RTS2RTS,
1603                 MTHCA_TRANS_SQERR2RTS,
1604                 MTHCA_TRANS_ANY2ERR,
1605                 MTHCA_TRANS_RTS2SQD,
1606                 MTHCA_TRANS_SQD2SQD,
1607                 MTHCA_TRANS_SQD2RTS,
1608                 MTHCA_TRANS_ANY2RST,
1609         };
1610         static const u16 op[] = {
1611                 0,                                                                                      /* MTHCA_TRANS_INVALID */
1612                 CMD_RST2INIT_QPEE,              /* MTHCA_TRANS_RST2INIT */
1613                 CMD_INIT2INIT_QPEE,             /* MTHCA_TRANS_INIT2INIT */
1614                 CMD_INIT2RTR_QPEE,              /* MTHCA_TRANS_INIT2RTR */
1615                 CMD_RTR2RTS_QPEE,               /* MTHCA_TRANS_RTR2RTS */
1616                 CMD_RTS2RTS_QPEE,               /* MTHCA_TRANS_RTS2RTS */
1617                 CMD_SQERR2RTS_QPEE,     /* MTHCA_TRANS_SQERR2RTS */
1618                 CMD_2ERR_QPEE,                          /* MTHCA_TRANS_ANY2ERR */
1619                 CMD_RTS2SQD_QPEE,               /* MTHCA_TRANS_RTS2SQD */
1620                 CMD_SQD2SQD_QPEE,               /* MTHCA_TRANS_SQD2SQD */
1621                 CMD_SQD2RTS_QPEE,               /* MTHCA_TRANS_SQD2RTS */
1622                 CMD_ERR2RST_QPEE                        /* MTHCA_TRANS_ANY2RST */
1623         };
1624         u8 op_mod = 0;
1625         int my_mailbox = 0;
1626         int err;
1627
1628         UNREFERENCED_PARAMETER(optmask);
1629
1630         if (trans < 0 || trans >= ARRAY_SIZE(op))
1631                 return -EINVAL;
1632
1633         if (trans == MTHCA_TRANS_ANY2RST) {
1634                 op_mod = 3;     /* don't write outbox, any->reset */
1635
1636                 /* For debugging */
1637                 if (!mailbox) {
1638                         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1639                         if (!IS_ERR(mailbox)) {
1640                                 my_mailbox = 1;
1641                                 op_mod     = 2; /* write outbox, any->reset */
1642                         } else
1643                                 mailbox = NULL;
1644                 }
1645         } else {
1646                 { // debug print
1647                         int i;
1648                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
1649                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  opt param mask: %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
1650                         for (i = 2; i < 0x100 / 4; i=i+4) {
1651                                 HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  [%02x] %08x %08x %08x %08x\n",i-2,
1652                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
1653                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
1654                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
1655                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
1656                         }
1657                 }
1658         }
1659
1660         if (trans == MTHCA_TRANS_ANY2RST) {
1661                 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1662                                     (!!is_ee << 24) | num, op_mod,
1663                                     op[trans], CMD_TIME_CLASS_C, status);
1664
1665                 if (mailbox) { // debug print
1666                         int i;
1667                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
1668                         for (i = 2; i < 0x100 / 4; i=i+4) {
1669                                 HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  [%02x] %08x %08x %08x %08x\n",i-2,
1670                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
1671                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
1672                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
1673                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
1674                         }
1675                 }
1676         } else
1677                 err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
1678                                 op_mod, op[trans], CMD_TIME_CLASS_C, status);
1679
1680         if (my_mailbox)
1681                 mthca_free_mailbox(dev, mailbox);
1682
1683         return err;
1684 }
1685
1686 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1687                    struct mthca_mailbox *mailbox, u8 *status)
1688 {
1689         return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1690                              CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
1691 }
1692
1693 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
1694                           u8 *status)
1695 {
1696         u8 op_mod;
1697
1698         switch (type) {
1699         case IB_QPT_QP0:
1700                 op_mod = 0;
1701                 break;
1702         case IB_QPT_QP1:
1703                 op_mod = 1;
1704                 break;
1705         case IB_QPT_RAW_IPV6:
1706                 op_mod = 2;
1707                 break;
1708         case IB_QPT_RAW_ETHER:
1709                 op_mod = 3;
1710                 break;
1711         default:
1712                 return -EINVAL;
1713         }
1714
1715         return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
1716                          CMD_TIME_CLASS_B, status);
1717 }
1718
1719 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1720                   int port, struct _ib_wc *in_wc, struct _ib_grh *in_grh,
1721                   void *in_mad, void *response_mad, u8 *status)
1722 {
1723         struct mthca_mailbox *inmailbox, *outmailbox;
1724         u8 *inbox;
1725         int err;
1726         u32 in_modifier = port;
1727         u8 op_modifier = 0;
1728
1729
1730 #define MAD_IFC_BOX_SIZE      0x400
1731 #define MAD_IFC_MY_QPN_OFFSET 0x100
1732 #define MAD_IFC_RQPN_OFFSET   0x108
1733 #define MAD_IFC_SL_OFFSET     0x10c
1734 #define MAD_IFC_G_PATH_OFFSET 0x10d
1735 #define MAD_IFC_RLID_OFFSET   0x10e
1736 #define MAD_IFC_PKEY_OFFSET   0x112
1737 #define MAD_IFC_GRH_OFFSET    0x140
1738
1739         inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1740         if (IS_ERR(inmailbox))
1741                 return PTR_ERR(inmailbox);
1742         inbox = inmailbox->buf;
1743
1744         outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1745         if (IS_ERR(outmailbox)) {
1746                 mthca_free_mailbox(dev, inmailbox);
1747                 return PTR_ERR(outmailbox);
1748         }
1749
1750         memcpy(inbox, in_mad, 256);
1751
1752         /*
1753          * Key check traps can't be generated unless we have in_wc to
1754          * tell us where to send the trap.
1755          */
1756         if (ignore_mkey || !in_wc)
1757                 op_modifier |= 0x1;
1758         if (ignore_bkey || !in_wc)
1759                 op_modifier |= 0x2;
1760
1761         if (in_wc) {
1762                 u8 val;
1763
1764                 memset(inbox + 256, 0, 256);
1765
1766
1767                 MTHCA_PUT(inbox, 0, MAD_IFC_MY_QPN_OFFSET);
1768                 MTHCA_PUT(inbox, cl_ntoh32(in_wc->recv.ud.remote_qp), MAD_IFC_RQPN_OFFSET);
1769                 val = in_wc->recv.ud.remote_sl << 4;
1770                 MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
1771
1772                 val = in_wc->recv.ud.path_bits |
1773                         (in_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID ? 0x80 : 0);
1774                 MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET)
1775
1776                 MTHCA_PUT(inbox, cl_ntoh16(in_wc->recv.ud.remote_lid), MAD_IFC_RLID_OFFSET);
1777                 MTHCA_PUT(inbox, in_wc->recv.ud.pkey_index, MAD_IFC_PKEY_OFFSET);
1778
1779                 if (in_grh)
1780                         memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1781
1782                 op_modifier |= 0x4;
1783                 
1784                 in_modifier |= cl_ntoh16(in_wc->recv.ud.remote_lid) << 16;
1785
1786         }
1787
1788         err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1789                             in_modifier, op_modifier,
1790                             CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
1791
1792         if (!err && !*status)
1793                 memcpy(response_mad, outmailbox->buf, 256);
1794
1795         mthca_free_mailbox(dev, inmailbox);
1796         mthca_free_mailbox(dev, outmailbox);
1797         return err;
1798 }
1799
1800 int mthca_READ_MGM(struct mthca_dev *dev, int index,
1801                    struct mthca_mailbox *mailbox, u8 *status)
1802 {
1803         return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1804                              CMD_READ_MGM, CMD_TIME_CLASS_A, status);
1805 }
1806
1807 int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1808                     struct mthca_mailbox *mailbox, u8 *status)
1809 {
1810         return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1811                          CMD_TIME_CLASS_A, status);
1812 }
1813
1814 int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1815                     u16 *hash, u8 *status)
1816 {
1817         u64 imm;
1818         int err;
1819
1820         err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1821                             CMD_TIME_CLASS_A, status);
1822
1823         *hash = (u16)imm;
1824         return err;
1825 }
1826
1827 int mthca_NOP(struct mthca_dev *dev, u8 *status)
1828 {
1829         return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, 100000, status);     /* 100 msecs */
1830 }