[MTHCA] Fix reserved MTTs calculation on mem-free HCAs (mlnx 2958)
[mirror/winof/.git] / hw / mthca / kernel / mthca_cmd.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id$
34  */
35
36 #include <ib_mad.h>
37
38 #include "mthca_dev.h"
39 #if defined(EVENT_TRACING)
40 #ifdef offsetof
41 #undef offsetof
42 #endif
43 #include "mthca_cmd.tmh"
44 #endif
45 #include "mthca_config_reg.h"
46 #include "mthca_cmd.h"
47 #include "mthca_memfree.h"
48
49 #define CMD_POLL_TOKEN 0xffff
50
51 enum {
52         HCR_IN_PARAM_OFFSET    = 0x00,
53         HCR_IN_MODIFIER_OFFSET = 0x08,
54         HCR_OUT_PARAM_OFFSET   = 0x0c,
55         HCR_TOKEN_OFFSET       = 0x14,
56         HCR_STATUS_OFFSET      = 0x18,
57
58         HCR_OPMOD_SHIFT        = 12,
59         HCA_E_BIT              = 22,
60         HCR_GO_BIT             = 23
61 };
62
63 enum {
64         /* initialization and general commands */
65         CMD_SYS_EN          = 0x1,
66         CMD_SYS_DIS         = 0x2,
67         CMD_MAP_FA          = 0xfff,
68         CMD_UNMAP_FA        = 0xffe,
69         CMD_RUN_FW          = 0xff6,
70         CMD_MOD_STAT_CFG    = 0x34,
71         CMD_QUERY_DEV_LIM   = 0x3,
72         CMD_QUERY_FW        = 0x4,
73         CMD_ENABLE_LAM      = 0xff8,
74         CMD_DISABLE_LAM     = 0xff7,
75         CMD_QUERY_DDR       = 0x5,
76         CMD_QUERY_ADAPTER   = 0x6,
77         CMD_INIT_HCA        = 0x7,
78         CMD_CLOSE_HCA       = 0x8,
79         CMD_INIT_IB         = 0x9,
80         CMD_CLOSE_IB        = 0xa,
81         CMD_QUERY_HCA       = 0xb,
82         CMD_SET_IB          = 0xc,
83         CMD_ACCESS_DDR      = 0x2e,
84         CMD_MAP_ICM         = 0xffa,
85         CMD_UNMAP_ICM       = 0xff9,
86         CMD_MAP_ICM_AUX     = 0xffc,
87         CMD_UNMAP_ICM_AUX   = 0xffb,
88         CMD_SET_ICM_SIZE    = 0xffd,
89
90         /* TPT commands */
91         CMD_SW2HW_MPT       = 0xd,
92         CMD_QUERY_MPT       = 0xe,
93         CMD_HW2SW_MPT       = 0xf,
94         CMD_READ_MTT        = 0x10,
95         CMD_WRITE_MTT       = 0x11,
96         CMD_SYNC_TPT        = 0x2f,
97
98         /* EQ commands */
99         CMD_MAP_EQ          = 0x12,
100         CMD_SW2HW_EQ        = 0x13,
101         CMD_HW2SW_EQ        = 0x14,
102         CMD_QUERY_EQ        = 0x15,
103
104         /* CQ commands */
105         CMD_SW2HW_CQ        = 0x16,
106         CMD_HW2SW_CQ        = 0x17,
107         CMD_QUERY_CQ        = 0x18,
108         CMD_RESIZE_CQ       = 0x2c,
109
110         /* SRQ commands */
111         CMD_SW2HW_SRQ       = 0x35,
112         CMD_HW2SW_SRQ       = 0x36,
113         CMD_QUERY_SRQ       = 0x37,
114         CMD_ARM_SRQ         = 0x40,
115
116         /* QP/EE commands */
117         CMD_RST2INIT_QPEE   = 0x19,
118         CMD_INIT2RTR_QPEE   = 0x1a,
119         CMD_RTR2RTS_QPEE    = 0x1b,
120         CMD_RTS2RTS_QPEE    = 0x1c,
121         CMD_SQERR2RTS_QPEE  = 0x1d,
122         CMD_2ERR_QPEE       = 0x1e,
123         CMD_RTS2SQD_QPEE    = 0x1f,
124         CMD_SQD2SQD_QPEE    = 0x38,
125         CMD_SQD2RTS_QPEE    = 0x20,
126         CMD_ERR2RST_QPEE    = 0x21,
127         CMD_QUERY_QPEE      = 0x22,
128         CMD_INIT2INIT_QPEE  = 0x2d,
129         CMD_SUSPEND_QPEE    = 0x32,
130         CMD_UNSUSPEND_QPEE  = 0x33,
131         /* special QPs and management commands */
132         CMD_CONF_SPECIAL_QP = 0x23,
133         CMD_MAD_IFC         = 0x24,
134
135         /* multicast commands */
136         CMD_READ_MGM        = 0x25,
137         CMD_WRITE_MGM       = 0x26,
138         CMD_MGID_HASH       = 0x27,
139
140         /* miscellaneous commands */
141         CMD_DIAG_RPRT       = 0x30,
142         CMD_NOP             = 0x31,
143
144         /* debug commands */
145         CMD_QUERY_DEBUG_MSG = 0x2a,
146         CMD_SET_DEBUG_MSG   = 0x2b,
147 };
148
149 /*
150  * According to Mellanox code, FW may be starved and never complete
151  * commands.  So we can't use strict timeouts described in PRM -- we
152  * just arbitrarily select 60 seconds for now.
153  */
154 #define CMD_POLL_N_TRIES                60
155
156 enum {
157         CMD_TIME_CLASS_A = 60 * HZ,
158         CMD_TIME_CLASS_B = 60 * HZ,
159         CMD_TIME_CLASS_C = 60 * HZ
160 };
161
162 enum {
163         GO_BIT_TIMEOUT = 10 * HZ
164 };
165
166 #define GO_BIT_N_TRIES          5
167 #define GO_BIT_STALL_TIMEOUT            ((GO_BIT_TIMEOUT/HZ)/GO_BIT_N_TRIES)            /* usecs */
168
169 struct mthca_cmd_context {
170         KEVENT  event;
171         int               result;
172         int               next;
173         u64               out_param;
174         u16               token;
175         u8                status;
176 };
177
178 static inline int go_bit(struct mthca_dev *dev)
179 {
180         return readl(dev->hcr + HCR_STATUS_OFFSET) &
181                 _byteswap_ulong(1 << HCR_GO_BIT);
182 }
183
184 /* 
185 *       Function: performs busy-wait loop, while polling GO bit
186 *       Return: 0 when GO bit was extinguished in time 
187 */
188 static int poll_go_bit(struct mthca_dev *dev)
189 {
190         int i=0; /* init must be here !*/
191         
192         if (!go_bit(dev)) 
193                 return 0;
194
195         for (; i<GO_BIT_N_TRIES; i++) {
196                 /* Nope, stall for a little bit and try again. */
197                 KeStallExecutionProcessor( GO_BIT_STALL_TIMEOUT );
198                 if (!go_bit(dev))
199                         return 0;
200         }               
201         
202         return 1;
203 }
204
205 /* 
206 * Function: put thread on hold, while polling GO bit
207 * Return: 0 when GO bit was extinguished in time 
208 * Note: the functions make c. CMD_POLL_N_TRIES polls
209 */
210 static int wait_go_bit(struct mthca_dev *dev, unsigned long timeout_usecs)
211 {
212 #ifdef USE_FAIR_GO_BIT_POLLING  
213 //
214 // the algorithm polls 'go bit'  N_POLL_TRIES times with a polling interval,
215 // increasing from 0 to MAX_POLL_INTERVAL with step of POLL_INTERVAL_DELTA
216 //
217 // The values of the above contains are set voluntarily.
218 // They require evetual tuning for which reason the algorithm is extinguished for now.
219
220                 int i = 0;
221 #define POLL_INTERVAL_DELTA             5 *(-10)        // 10 usec
222 #define MAX_POLL_INTERVAL                       200 *(-10)      // 200 usec
223 #define N_POLL_TRIES                            40
224 #endif
225         u64 start, end;
226         LARGE_INTEGER  interval;
227
228         if (!go_bit(dev))       return 0;
229
230         interval.QuadPart = 0;
231         start = cl_get_time_stamp();
232         end = start + timeout_usecs;
233         while (go_bit(dev) && (cl_get_time_stamp() < end)) {
234                 KeDelayExecutionThread( KernelMode, FALSE, &interval );
235 #ifdef USE_FAIR_GO_BIT_POLLING  
236                 if (++i >= N_POLL_TRIES) {
237                         if ( (__int64)interval.QuadPart > (__int64)MAX_POLL_INTERVAL)
238                                 interval.QuadPart += POLL_INTERVAL_DELTA;
239                         i = 0;
240                 }
241 #endif          
242         }
243
244         if (!go_bit(dev))       return 0;
245         return 1;       
246 }
247
248
249 static int mthca_cmd_post(struct mthca_dev *dev,
250                           u64 in_param,
251                           u64 out_param,
252                           u32 in_modifier,
253                           u8 op_modifier,
254                           u16 op,
255                           u16 token,
256                           int event)
257 {
258         int err = 0;
259
260         down(&dev->cmd.hcr_mutex);
261
262         if (event && wait_go_bit(dev,GO_BIT_TIMEOUT)) {
263                 err = -EAGAIN;
264                 goto out;
265         }
266
267         /*
268          * We use writel (instead of something like memcpy_toio)
269          * because writes of less than 32 bits to the HCR don't work
270          * (and some architectures such as ia64 implement memcpy_toio
271          * in terms of writeb).
272          */
273         __raw_writel((u32) cl_hton32((u32)(in_param >> 32)),           (u8 *)dev->hcr + 0 * 4);
274         __raw_writel((u32) cl_hton32((u32)(in_param & 0xfffffffful)), (u8 *) dev->hcr + 1 * 4);
275         __raw_writel((u32) cl_hton32(in_modifier),              (u8 *)dev->hcr + 2 * 4);
276         __raw_writel((u32) cl_hton32((u32)(out_param >> 32)),          (u8 *)dev->hcr + 3 * 4);
277         __raw_writel((u32) cl_hton32((u32)(out_param & 0xfffffffful)), (u8 *)dev->hcr + 4 * 4);
278         __raw_writel((u32) cl_hton32(token << 16),              (u8 *)dev->hcr + 5 * 4);
279
280         /* __raw_writel may not order writes. */
281         wmb();
282
283         __raw_writel((u32) cl_hton32((1 << HCR_GO_BIT)                |
284                                                (event ? (1 << HCA_E_BIT) : 0)   |
285                                                (op_modifier << HCR_OPMOD_SHIFT) |
286                                                op),                       (u8 *)dev->hcr + 6 * 4);
287
288 out:
289         up(&dev->cmd.hcr_mutex);
290         return err;
291 }
292
293
294 static int mthca_cmd_poll(struct mthca_dev *dev,
295                           u64 in_param,
296                           u64 *out_param,
297                           int out_is_imm,
298                           u32 in_modifier,
299                           u8 op_modifier,
300                           u16 op,
301                           unsigned long timeout,
302                           u8 *status)
303 {
304         int err = 0;
305
306         sem_down(&dev->cmd.poll_sem);
307
308         err = mthca_cmd_post(dev, in_param,
309                              out_param ? *out_param : 0,
310                              in_modifier, op_modifier,
311                              op, CMD_POLL_TOKEN, 0);
312         if (err)
313                 goto out;
314
315         if (wait_go_bit(dev,timeout)) {
316                 err = -EBUSY;
317                 goto out;
318         }
319         
320         if (out_is_imm)
321                 *out_param = 
322                         (u64) cl_ntoh32((__be32)
323                                           __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
324                         (u64) cl_ntoh32((__be32)
325                                           __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
326
327         *status = (u8)(cl_ntoh32((__be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24);
328         if (*status)
329                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
330                           op, *status));
331
332 out:
333         sem_up(&dev->cmd.poll_sem);
334         return err;
335 }
336
337 void mthca_cmd_event(struct mthca_dev *dev,
338                      u16 token,
339                      u8  status,
340                      u64 out_param)
341 {
342         struct mthca_cmd_context *context =
343                 &dev->cmd.context[token & dev->cmd.token_mask];
344
345         /* previously timed out command completing at long last */
346         if (token != context->token)
347                 return;
348
349         context->result    = 0;
350         context->status    = status;
351         context->out_param = out_param;
352
353         context->token += dev->cmd.token_mask + 1;
354
355         ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
356         KeSetEvent( &context->event, 0, FALSE );
357 }
358
359 static int mthca_cmd_wait(struct mthca_dev *dev,
360                           u64 in_param,
361                           u64 *out_param,
362                           int out_is_imm,
363                           u32 in_modifier,
364                           u8 op_modifier,
365                           u16 op,
366                           unsigned long timeout,
367                           u8 *status)
368 {
369         int err = 0;
370         struct mthca_cmd_context *context;
371         SPIN_LOCK_PREP(lh);
372
373         sem_down(&dev->cmd.event_sem);
374
375         spin_lock( &dev->cmd.context_lock, &lh );
376         BUG_ON(dev->cmd.free_head < 0);
377         context = &dev->cmd.context[dev->cmd.free_head];
378         dev->cmd.free_head = context->next;
379         spin_unlock( &lh );
380
381         KeClearEvent(   &context->event );
382         err = mthca_cmd_post(dev, in_param,
383                              out_param ? *out_param : 0,
384                              in_modifier, op_modifier,
385                              op, context->token, 1);
386         if (err) {
387                 HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW,
388                         ("mthca_cmd_wait: Command %02x completed with err %02x\n", op, err));
389                 goto out;
390         }
391
392         {
393                 NTSTATUS res;
394                 LARGE_INTEGER  interval;
395                 interval.QuadPart = (-10)* (__int64)timeout;
396                 res = KeWaitForSingleObject( &context->event, Executive, KernelMode, FALSE,  &interval );
397                 if (res != STATUS_SUCCESS) {
398                         err = -EBUSY;
399                         HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW,
400                                 ("mthca_cmd_wait: Command %02x completed with err %02x\n", op, err));
401                         goto out;
402                 }
403         }
404
405         *status = context->status;
406         if (*status)
407                 HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
408                           op, *status));
409
410         if (out_is_imm)
411                 *out_param = context->out_param;
412
413 out:
414         spin_lock(&dev->cmd.context_lock, &lh);
415         context->next = dev->cmd.free_head;
416         dev->cmd.free_head = (int)(context - dev->cmd.context);
417         spin_unlock(&lh);
418
419         sem_up( &dev->cmd.event_sem );
420
421         return err;
422 }
423
424 /* Invoke a command with an output mailbox */
425 static int mthca_cmd_box(struct mthca_dev *dev,
426                          u64 in_param,
427                          u64 out_param,
428                          u32 in_modifier,
429                          u8 op_modifier,
430                          u16 op,
431                          unsigned long timeout,
432                          u8 *status)
433 {
434         if (dev->cmd.use_events)
435                 return mthca_cmd_wait(dev, in_param, &out_param, 0,
436                                       in_modifier, op_modifier, op,
437                                       timeout, status);
438         else
439                 return mthca_cmd_poll(dev, in_param, &out_param, 0,
440                                       in_modifier, op_modifier, op,
441                                       timeout, status);
442 }
443
444 /* Invoke a command with no output parameter */
445 static int mthca_cmd(struct mthca_dev *dev,
446                      u64 in_param,
447                      u32 in_modifier,
448                      u8 op_modifier,
449                      u16 op,
450                      unsigned long timeout,
451                      u8 *status)
452 {
453         return mthca_cmd_box(dev, in_param, 0, in_modifier,
454                              op_modifier, op, timeout, status);
455 }
456
457 /*
458  * Invoke a command with an immediate output parameter (and copy the
459  * output into the caller's out_param pointer after the command
460  * executes).
461  */
462 static int mthca_cmd_imm(struct mthca_dev *dev,
463                          u64 in_param,
464                          u64 *out_param,
465                          u32 in_modifier,
466                          u8 op_modifier,
467                          u16 op,
468                          unsigned long timeout,
469                          u8 *status)
470 {
471         if (dev->cmd.use_events)
472                 return mthca_cmd_wait(dev, in_param, out_param, 1,
473                                       in_modifier, op_modifier, op,
474                                       timeout, status);
475         else
476                 return mthca_cmd_poll(dev, in_param, out_param, 1,
477                                       in_modifier, op_modifier, op,
478                                       timeout, status);
479 }
480
481 int mthca_cmd_init(struct mthca_dev *dev)
482 {
483         KeInitializeMutex(&dev->cmd.hcr_mutex, 0);
484         sem_init(&dev->cmd.poll_sem, 1, 1);
485         dev->cmd.use_events = 0;
486
487         dev->hcr = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_HCR) + MTHCA_HCR_BASE,
488                            MTHCA_HCR_SIZE, &dev->hcr_size);
489         if (!dev->hcr) {
490                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map command register."));
491                 return -ENOMEM;
492         }
493
494         dev->cmd.pool = pci_pool_create("mthca_cmd", dev,
495                                         MTHCA_MAILBOX_SIZE,
496                                         MTHCA_MAILBOX_SIZE, 0);
497         if (!dev->cmd.pool) {
498                 iounmap(dev->hcr, dev->hcr_size);
499                 return -ENOMEM;
500         }
501
502         return 0;
503 }
504
505 void mthca_cmd_cleanup(struct mthca_dev *dev)
506 {
507         pci_pool_destroy(dev->cmd.pool);
508         iounmap(dev->hcr, dev->hcr_size);
509 }
510
511 /*
512  * Switch to using events to issue FW commands (should be called after
513  * event queue to command events has been initialized).
514  */
515 int mthca_cmd_use_events(struct mthca_dev *dev)
516 {
517         int i;
518
519         dev->cmd.context = kmalloc(dev->cmd.max_cmds *
520                                    sizeof (struct mthca_cmd_context),
521                                    GFP_KERNEL);
522         if (!dev->cmd.context)
523                 return -ENOMEM;
524
525         for (i = 0; i < dev->cmd.max_cmds; ++i) {
526                 dev->cmd.context[i].token = (u16)i;
527                 dev->cmd.context[i].next = i + 1;
528         KeInitializeEvent(      &dev->cmd.context[i].event, NotificationEvent , FALSE );
529         }
530
531         dev->cmd.context[dev->cmd.max_cmds - 1].next = -1;
532         dev->cmd.free_head = 0;
533
534         sem_init(&dev->cmd.event_sem, dev->cmd.max_cmds, LONG_MAX);
535         spin_lock_init(&dev->cmd.context_lock);
536
537         for (dev->cmd.token_mask = 1;
538              dev->cmd.token_mask < dev->cmd.max_cmds;
539              dev->cmd.token_mask <<= 1)
540                 ; /* nothing */
541         --dev->cmd.token_mask;
542
543         dev->cmd.use_events = 1;
544         sem_down(&dev->cmd.poll_sem);
545
546         return 0;
547 }
548
549 /*
550  * Switch back to polling (used when shutting down the device)
551  */
552 void mthca_cmd_use_polling(struct mthca_dev *dev)
553 {
554         int i;
555
556         dev->cmd.use_events = 0;
557
558         for (i = 0; i < dev->cmd.max_cmds; ++i)
559                 sem_down(&dev->cmd.event_sem);
560
561         kfree(dev->cmd.context);
562
563         sem_up(&dev->cmd.poll_sem);
564 }
565
566 struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
567                                           unsigned int gfp_mask)
568 {
569         struct mthca_mailbox *mailbox;
570
571         mailbox = kmalloc(sizeof *mailbox, gfp_mask);
572         if (!mailbox)
573                 return ERR_PTR(-ENOMEM);
574
575         mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
576         if (!mailbox->buf) {
577                 kfree(mailbox);
578                 return ERR_PTR(-ENOMEM);
579         }
580
581         return mailbox;
582 }
583
584 void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
585 {
586         if (!mailbox)
587                 return;
588
589         pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
590         kfree(mailbox);
591 }
592
593 int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
594 {
595         u64 out;
596         int ret;
597
598         ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status);
599
600         if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
601                 HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SYS_EN DDR error: syn=%x, sock=%d, "
602                            "sladdr=%d, SPD source=%s\n",
603                            (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
604                            (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM"));
605
606         return ret;
607 }
608
609 int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
610 {
611         return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status);
612 }
613
614 static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
615                          u64 virt, u8 *status)
616 {
617         struct mthca_mailbox *mailbox;
618         struct mthca_icm_iter iter;
619         __be64 *pages;
620         int lg;
621         int nent = 0;
622         unsigned long i;
623         int err = 0;
624         int ts = 0, tc = 0;
625         CPU_2_BE64_PREP;
626
627         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
628         if (IS_ERR(mailbox))
629                 return PTR_ERR(mailbox);
630         RtlZeroMemory(mailbox->buf, MTHCA_MAILBOX_SIZE);
631         pages = mailbox->buf;
632
633         for (mthca_icm_first(icm, &iter);
634              !mthca_icm_last(&iter);
635              mthca_icm_next(&iter)) {
636                 /*
637                  * We have to pass pages that are aligned to their
638                  * size, so find the least significant 1 in the
639                  * address or size and use that as our log2 size.
640                  */
641                 i = (u32)mthca_icm_addr(&iter) | mthca_icm_size(&iter);
642                 lg = ffs(i) - 1;
643                 if (lg < 12) {
644                         HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Got FW area not aligned to 4K (%I64x/%lx).\n",
645                                    (u64) mthca_icm_addr(&iter),
646                                    mthca_icm_size(&iter)));
647                         err = -EINVAL;
648                         goto out;
649                 }
650                 for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
651                         if (virt != -1) {
652                                 pages[nent * 2] = cl_hton64(virt);
653                                 virt += 1Ui64 << lg;
654                         }
655                         pages[nent * 2 + 1] = CPU_2_BE64((mthca_icm_addr(&iter) +
656                                                            (i << lg)) | (lg - 12));
657                         ts += 1 << (lg - 10);
658                         ++tc;
659
660                         if (++nent == MTHCA_MAILBOX_SIZE / 16) {
661                                 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
662                                                 CMD_TIME_CLASS_B, status);
663                                 if (err || *status)
664                                         goto out;
665                                 nent = 0;
666                         }
667                 }
668         }
669
670         if (nent)
671                 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
672                                 CMD_TIME_CLASS_B, status);
673
674         switch (op) {
675         case CMD_MAP_FA:
676                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for FW.\n", tc, ts));
677                 break;
678         case CMD_MAP_ICM_AUX:
679                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for ICM aux.\n", tc, ts));
680                 break;
681         case CMD_MAP_ICM:
682                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped %d chunks/%d KB at %I64x for ICM.\n",
683                           tc, ts, (u64) virt - (ts << 10)));
684                 break;
685         }
686
687 out:
688         mthca_free_mailbox(dev, mailbox);
689         return err;
690 }
691
692 int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
693 {
694         return mthca_map_cmd(dev, CMD_MAP_FA, icm, (u64)-1, status);
695 }
696
697 int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
698 {
699         return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
700 }
701
702 int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
703 {
704         return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
705 }
706
707 int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
708 {
709         struct mthca_mailbox *mailbox;
710         u32 *outbox;
711         int err = 0;
712         u8 lg;
713
714 #define QUERY_FW_OUT_SIZE             0x100
715 #define QUERY_FW_VER_OFFSET            0x00
716 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
717 #define QUERY_FW_ERR_START_OFFSET      0x30
718 #define QUERY_FW_ERR_SIZE_OFFSET       0x38
719
720 #define QUERY_FW_START_OFFSET          0x20
721 #define QUERY_FW_END_OFFSET            0x28
722
723 #define QUERY_FW_SIZE_OFFSET           0x00
724 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
725 #define QUERY_FW_EQ_ARM_BASE_OFFSET    0x40
726 #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
727
728         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
729         if (IS_ERR(mailbox))
730                 return PTR_ERR(mailbox);
731         outbox = mailbox->buf;
732
733         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
734                             CMD_TIME_CLASS_A, status);
735
736         if (err)
737                 goto out;
738
739         MTHCA_GET(dev->fw_ver,   outbox, QUERY_FW_VER_OFFSET);
740         /*
741          * FW subSIZE_Tor version is at more signifant bits than minor
742          * version, so swap here.
743          */
744         dev->fw_ver = (dev->fw_ver & 0xffff00000000Ui64) |
745                 ((dev->fw_ver & 0xffff0000Ui64) >> 16) |
746                 ((dev->fw_ver & 0x0000ffffUi64) << 16);
747
748         MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
749         dev->cmd.max_cmds = 1 << lg;
750         MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);      
751         MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
752
753         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW version %012I64x, max commands %d\n",
754                   (u64) dev->fw_ver, dev->cmd.max_cmds));
755         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Catastrophic error buffer at 0x%I64x, size 0x%x\n",
756                 (u64) dev->catas_err.addr, dev->catas_err.size));
757
758
759         if (mthca_is_memfree(dev)) {
760                 MTHCA_GET(dev->fw.arbel.fw_pages,       outbox, QUERY_FW_SIZE_OFFSET);
761                 MTHCA_GET(dev->fw.arbel.clr_int_base,   outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
762                 MTHCA_GET(dev->fw.arbel.eq_arm_base,    outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
763                 MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET);
764                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("FW size %d KB\n", dev->fw.arbel.fw_pages << 2));
765
766                 /*
767                  * Arbel page size is always 4 KB; round up number of
768                  * system pages needed.
769                  */
770                 dev->fw.arbel.fw_pages =
771                         ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
772                                 (PAGE_SHIFT - 12);
773
774                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Clear int @ %I64x, EQ arm @ %I64x, EQ set CI @ %I64x\n",
775                           (u64) dev->fw.arbel.clr_int_base,
776                           (u64) dev->fw.arbel.eq_arm_base,
777                           (u64) dev->fw.arbel.eq_set_ci_base));
778         } else {
779                 MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
780                 MTHCA_GET(dev->fw.tavor.fw_end,   outbox, QUERY_FW_END_OFFSET);
781
782                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW size %d KB (start %I64x, end %I64x)\n",
783                           (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
784                           (u64) dev->fw.tavor.fw_start,
785                           (u64) dev->fw.tavor.fw_end));
786         }
787
788 out:
789         mthca_free_mailbox(dev, mailbox);
790         return err;
791 }
792
793 int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
794 {
795         struct mthca_mailbox *mailbox;
796         u8 info;
797         u32 *outbox;
798         int err = 0;
799
800 #define ENABLE_LAM_OUT_SIZE         0x100
801 #define ENABLE_LAM_START_OFFSET     0x00
802 #define ENABLE_LAM_END_OFFSET       0x08
803 #define ENABLE_LAM_INFO_OFFSET      0x13
804
805 #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
806 #define ENABLE_LAM_INFO_ECC_MASK    0x3
807
808         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
809         if (IS_ERR(mailbox))
810                 return PTR_ERR(mailbox);
811         outbox = mailbox->buf;
812
813         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
814                             CMD_TIME_CLASS_C, status);
815
816         if (err)
817                 goto out;
818
819         if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
820                 goto out;
821
822         MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
823         MTHCA_GET(dev->ddr_end,   outbox, ENABLE_LAM_END_OFFSET);
824         MTHCA_GET(info,           outbox, ENABLE_LAM_INFO_OFFSET);
825
826         if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) !=
827             !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
828                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
829                            "is %s hidden; does not match PCI config\n",
830                            (info & ENABLE_LAM_INFO_HIDDEN_FLAG)?
831                            "" : "not"));
832         }
833         if (info & ENABLE_LAM_INFO_HIDDEN_FLAG)
834                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
835
836         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
837                   (int) ((dev->ddr_end - dev->ddr_start) >> 10),
838                   (u64) dev->ddr_start,
839                   (u64) dev->ddr_end));
840
841 out:
842         mthca_free_mailbox(dev, mailbox);
843         return err;
844 }
845
846 int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
847 {
848         return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
849 }
850
851 int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
852 {
853         struct mthca_mailbox *mailbox;
854         u8 info;
855         u32 *outbox;
856         int err = 0;
857
858 #define QUERY_DDR_OUT_SIZE         0x100
859 #define QUERY_DDR_START_OFFSET     0x00
860 #define QUERY_DDR_END_OFFSET       0x08
861 #define QUERY_DDR_INFO_OFFSET      0x13
862
863 #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
864 #define QUERY_DDR_INFO_ECC_MASK    0x3
865
866         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
867         if (IS_ERR(mailbox))
868                 return PTR_ERR(mailbox);
869         outbox = mailbox->buf;
870
871         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
872                             CMD_TIME_CLASS_A, status);
873
874         if (err)
875                 goto out;
876
877         MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET);
878         MTHCA_GET(dev->ddr_end,   outbox, QUERY_DDR_END_OFFSET);
879         MTHCA_GET(info,           outbox, QUERY_DDR_INFO_OFFSET);
880
881         if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) !=
882             !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
883
884                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
885                            "is %s hidden; does not match PCI config\n",
886                            (info & QUERY_DDR_INFO_HIDDEN_FLAG) ?
887                            "" : "not"));
888         }
889         if (info & QUERY_DDR_INFO_HIDDEN_FLAG)
890                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
891
892         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
893                   (int) ((dev->ddr_end - dev->ddr_start) >> 10),
894                   (u64) dev->ddr_start,
895                   (u64) dev->ddr_end));
896
897 out:
898         mthca_free_mailbox(dev, mailbox);
899         return err;
900 }
901
902 int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
903                         struct mthca_dev_lim *dev_lim, u8 *status)
904 {
905         struct mthca_mailbox *mailbox;
906         u32 *outbox;
907         u8 field;
908         u16 size;
909         int err;
910
911 #define QUERY_DEV_LIM_OUT_SIZE             0x100
912 #define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET     0x10
913 #define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET      0x11
914 #define QUERY_DEV_LIM_RSVD_QP_OFFSET        0x12
915 #define QUERY_DEV_LIM_MAX_QP_OFFSET         0x13
916 #define QUERY_DEV_LIM_RSVD_SRQ_OFFSET       0x14
917 #define QUERY_DEV_LIM_MAX_SRQ_OFFSET        0x15
918 #define QUERY_DEV_LIM_RSVD_EEC_OFFSET       0x16
919 #define QUERY_DEV_LIM_MAX_EEC_OFFSET        0x17
920 #define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET      0x19
921 #define QUERY_DEV_LIM_RSVD_CQ_OFFSET        0x1a
922 #define QUERY_DEV_LIM_MAX_CQ_OFFSET         0x1b
923 #define QUERY_DEV_LIM_MAX_MPT_OFFSET        0x1d
924 #define QUERY_DEV_LIM_RSVD_EQ_OFFSET        0x1e
925 #define QUERY_DEV_LIM_MAX_EQ_OFFSET         0x1f
926 #define QUERY_DEV_LIM_RSVD_MTT_OFFSET       0x20
927 #define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET     0x21
928 #define QUERY_DEV_LIM_RSVD_MRW_OFFSET       0x22
929 #define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET    0x23
930 #define QUERY_DEV_LIM_MAX_AV_OFFSET         0x27
931 #define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET     0x29
932 #define QUERY_DEV_LIM_MAX_RES_QP_OFFSET     0x2b
933 #define QUERY_DEV_LIM_MAX_RDMA_OFFSET       0x2f
934 #define QUERY_DEV_LIM_RSZ_SRQ_OFFSET        0x33
935 #define QUERY_DEV_LIM_ACK_DELAY_OFFSET      0x35
936 #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET      0x36
937 #define QUERY_DEV_LIM_VL_PORT_OFFSET        0x37
938 #define QUERY_DEV_LIM_MAX_GID_OFFSET        0x3b
939 #define QUERY_DEV_LIM_MAX_PKEY_OFFSET       0x3f
940 #define QUERY_DEV_LIM_FLAGS_OFFSET          0x44
941 #define QUERY_DEV_LIM_RSVD_UAR_OFFSET       0x48
942 #define QUERY_DEV_LIM_UAR_SZ_OFFSET         0x49
943 #define QUERY_DEV_LIM_PAGE_SZ_OFFSET        0x4b
944 #define QUERY_DEV_LIM_MAX_SG_OFFSET         0x51
945 #define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET    0x52
946 #define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET      0x55
947 #define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56
948 #define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET     0x61
949 #define QUERY_DEV_LIM_RSVD_MCG_OFFSET       0x62
950 #define QUERY_DEV_LIM_MAX_MCG_OFFSET        0x63
951 #define QUERY_DEV_LIM_RSVD_PD_OFFSET        0x64
952 #define QUERY_DEV_LIM_MAX_PD_OFFSET         0x65
953 #define QUERY_DEV_LIM_RSVD_RDD_OFFSET       0x66
954 #define QUERY_DEV_LIM_MAX_RDD_OFFSET        0x67
955 #define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET   0x80
956 #define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET   0x82
957 #define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET  0x84
958 #define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET  0x86
959 #define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET   0x88
960 #define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET   0x8a
961 #define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET   0x8c
962 #define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET   0x8e
963 #define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET   0x90
964 #define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET   0x92
965 #define QUERY_DEV_LIM_PBL_SZ_OFFSET         0x96
966 #define QUERY_DEV_LIM_BMME_FLAGS_OFFSET     0x97
967 #define QUERY_DEV_LIM_RSVD_LKEY_OFFSET      0x98
968 #define QUERY_DEV_LIM_LAMR_OFFSET           0x9f
969 #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET     0xa0
970
971         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
972         if (IS_ERR(mailbox))
973                 return PTR_ERR(mailbox);
974         outbox = mailbox->buf;
975
976         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
977                             CMD_TIME_CLASS_A, status);
978
979         if (err)
980                 goto out;
981
982         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
983         dev_lim->reserved_qps = 1 << (field & 0xf);
984         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
985         dev_lim->max_qps = 1 << (field & 0x1f);
986         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET);
987         dev_lim->reserved_srqs = 1 << (field >> 4);
988         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET);
989         dev_lim->max_srqs = 1 << (field & 0x1f);
990         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET);
991         dev_lim->reserved_eecs = 1 << (field & 0xf);
992         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET);
993         dev_lim->max_eecs = 1 << (field & 0x1f);
994         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET);
995         dev_lim->max_cq_sz = 1 << field;
996         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET);
997         dev_lim->reserved_cqs = 1 << (field & 0xf);
998         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET);
999         dev_lim->max_cqs = 1 << (field & 0x1f);
1000         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET);
1001         dev_lim->max_mpts = 1 << (field & 0x3f);
1002         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET);
1003         dev_lim->reserved_eqs = 1 << (field & 0xf);
1004         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET);
1005         dev_lim->max_eqs = 1 << (field & 0x7);
1006         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
1007         if (mthca_is_memfree(dev))
1008                 dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
1009                         MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE;
1010         else
1011                 dev_lim->reserved_mtts = 1 << (field >> 4);
1012         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
1013         dev_lim->max_mrw_sz = 1 << field;
1014         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET);
1015         dev_lim->reserved_mrws = 1 << (field & 0xf);
1016         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET);
1017         dev_lim->max_mtt_seg = 1 << (field & 0x3f);
1018         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET);
1019         dev_lim->max_requester_per_qp = 1 << (field & 0x3f);
1020         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET);
1021         dev_lim->max_responder_per_qp = 1 << (field & 0x3f);
1022         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET);
1023         dev_lim->max_rdma_global = 1 << (field & 0x3f);
1024         MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET);
1025         dev_lim->local_ca_ack_delay = field & 0x1f;
1026         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET);
1027         dev_lim->max_mtu        = field >> 4;
1028         dev_lim->max_port_width = field & 0xf;
1029         MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET);
1030         dev_lim->max_vl    = field >> 4;
1031         dev_lim->num_ports = field & 0xf;
1032         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
1033         dev_lim->max_gids = 1 << (field & 0xf);
1034         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
1035         dev_lim->max_pkeys = 1 << (field & 0xf);
1036         MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
1037         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET);
1038         dev_lim->reserved_uars = field >> 4;
1039         MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET);
1040         dev_lim->uar_size = 1 << ((field & 0x3f) + 20);
1041         MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET);
1042         dev_lim->min_page_sz = 1 << field;
1043         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET);
1044         dev_lim->max_sg = field;
1045
1046         MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET);
1047         dev_lim->max_desc_sz = size;
1048
1049         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET);
1050         dev_lim->max_qp_per_mcg = 1 << field;
1051         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET);
1052         dev_lim->reserved_mgms = field & 0xf;
1053         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET);
1054         dev_lim->max_mcgs = 1 << field;
1055         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET);
1056         dev_lim->reserved_pds = field >> 4;
1057         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET);
1058         dev_lim->max_pds = 1 << (field & 0x3f);
1059         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET);
1060         dev_lim->reserved_rdds = field >> 4;
1061         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET);
1062         dev_lim->max_rdds = 1 << (field & 0x3f);
1063
1064         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET);
1065         dev_lim->eec_entry_sz = size;
1066         MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET);
1067         dev_lim->qpc_entry_sz = size;
1068         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET);
1069         dev_lim->eeec_entry_sz = size;
1070         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET);
1071         dev_lim->eqpc_entry_sz = size;
1072         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET);
1073         dev_lim->eqc_entry_sz = size;
1074         MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET);
1075         dev_lim->cqc_entry_sz = size;
1076         MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET);
1077         dev_lim->srq_entry_sz = size;
1078         MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
1079         dev_lim->uar_scratch_entry_sz = size;
1080
1081         if (mthca_is_memfree(dev)) {
1082                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1083                 dev_lim->max_srq_sz = 1 << field;
1084                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1085                 dev_lim->max_qp_sz = 1 << field;
1086                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
1087                 dev_lim->hca.arbel.resize_srq = field & 1;
1088                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
1089                 dev_lim->max_sg = min(field, dev_lim->max_sg);
1090                 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
1091                 dev_lim->max_desc_sz = min((int)size, dev_lim->max_desc_sz);            
1092                 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
1093                 dev_lim->mpt_entry_sz = size;
1094                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
1095                 dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f);
1096                 MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox,
1097                           QUERY_DEV_LIM_BMME_FLAGS_OFFSET);
1098                 MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox,
1099                           QUERY_DEV_LIM_RSVD_LKEY_OFFSET);
1100                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET);
1101                 dev_lim->hca.arbel.lam_required = field & 1;
1102                 MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox,
1103                           QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET);
1104
1105                 if (dev_lim->hca.arbel.bmme_flags & 1){
1106                         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Base MM extensions: yes "
1107                                   "(flags %d, max PBL %d, rsvd L_Key %08x)\n",
1108                                   dev_lim->hca.arbel.bmme_flags,
1109                                   dev_lim->hca.arbel.max_pbl_sz,
1110                                   dev_lim->hca.arbel.reserved_lkey));
1111                 }else{
1112                         HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Base MM extensions: no\n"));
1113                 }
1114
1115                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max ICM size %I64d MB\n",
1116                           (u64) dev_lim->hca.arbel.max_icm_sz >> 20));
1117         } 
1118         else {
1119                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1120                 dev_lim->max_srq_sz = (1 << field) - 1;
1121                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1122                 dev_lim->max_qp_sz = (1 << field) - 1;
1123                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
1124                 dev_lim->hca.tavor.max_avs = 1I64 << (field & 0x3f);
1125                 dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
1126         }
1127
1128         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1129                   dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz));
1130         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1131                   dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz));
1132         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1133                   dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz));
1134         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max EQs: %d, reserved EQs: %d, entry size: %d\n",
1135                   dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz));
1136         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("reserved MPTs: %d, reserved MTTs: %d\n",
1137                   dev_lim->reserved_mrws, dev_lim->reserved_mtts));
1138         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1139                   dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars));
1140         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QP/MCG: %d, reserved MGMs: %d\n",
1141                   dev_lim->max_pds, dev_lim->reserved_mgms));
1142         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1143                   dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz));
1144
1145         HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Flags: %08x\n", dev_lim->flags));
1146
1147 out:
1148         mthca_free_mailbox(dev, mailbox);
1149         return err;
1150 }
1151
1152 static void get_board_id(u8 *vsd, char *board_id)
1153 {
1154         int i;
1155
1156 #define VSD_OFFSET_SIG1         0x00
1157 #define VSD_OFFSET_SIG2         0xde
1158 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1159 #define VSD_OFFSET_TS_BOARD_ID  0x20
1160
1161 #define VSD_SIGNATURE_TOPSPIN   0x5ad
1162
1163         RtlZeroMemory(board_id, MTHCA_BOARD_ID_LEN);
1164
1165         if (cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG1)) == VSD_SIGNATURE_TOPSPIN &&
1166             cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG2)) == VSD_SIGNATURE_TOPSPIN) {
1167                 strlcpy(board_id, (const char *)(vsd + VSD_OFFSET_TS_BOARD_ID), MTHCA_BOARD_ID_LEN);
1168         } else {
1169                 /*
1170                  * The board ID is a string but the firmware byte
1171                  * swaps each 4-byte word before passing it back to
1172                  * us.  Therefore we need to swab it before printing.
1173                  */
1174                 for (i = 0; i < 4; ++i)
1175                         ((u32 *) board_id)[i] =
1176                                 _byteswap_ulong(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1177         }
1178 }
1179
1180 int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1181                         struct mthca_adapter *adapter, u8 *status)
1182 {
1183         struct mthca_mailbox *mailbox;
1184         u32 *outbox;
1185         int err;
1186
1187 #define QUERY_ADAPTER_OUT_SIZE             0x100
1188 #define QUERY_ADAPTER_VENDOR_ID_OFFSET     0x00
1189 #define QUERY_ADAPTER_DEVICE_ID_OFFSET     0x04
1190 #define QUERY_ADAPTER_REVISION_ID_OFFSET   0x08
1191 #define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1192 #define QUERY_ADAPTER_VSD_OFFSET           0x20
1193
1194         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1195         if (IS_ERR(mailbox))
1196                 return PTR_ERR(mailbox);
1197         outbox = mailbox->buf;
1198
1199         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1200                             CMD_TIME_CLASS_A, status);
1201
1202         if (err)
1203                 goto out;
1204
1205         MTHCA_GET(adapter->vendor_id, outbox,   QUERY_ADAPTER_VENDOR_ID_OFFSET);
1206         MTHCA_GET(adapter->device_id, outbox,   QUERY_ADAPTER_DEVICE_ID_OFFSET);
1207         MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1208         MTHCA_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1209
1210         get_board_id((u8*)outbox + QUERY_ADAPTER_VSD_OFFSET,
1211                      adapter->board_id);
1212
1213 out:
1214         mthca_free_mailbox(dev, mailbox);
1215         return err;
1216 }
1217
1218 int mthca_INIT_HCA(struct mthca_dev *dev,
1219                    struct mthca_init_hca_param *param,
1220                    u8 *status)
1221 {
1222         struct mthca_mailbox *mailbox;
1223         __be32 *inbox;
1224         int err;
1225
1226 #define INIT_HCA_IN_SIZE                 0x200
1227 #define INIT_HCA_FLAGS_OFFSET            0x014
1228 #define INIT_HCA_QPC_OFFSET              0x020
1229 #define  INIT_HCA_QPC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x10)
1230 #define  INIT_HCA_LOG_QP_OFFSET          (INIT_HCA_QPC_OFFSET + 0x17)
1231 #define  INIT_HCA_EEC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x20)
1232 #define  INIT_HCA_LOG_EEC_OFFSET         (INIT_HCA_QPC_OFFSET + 0x27)
1233 #define  INIT_HCA_SRQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x28)
1234 #define  INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
1235 #define  INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
1236 #define  INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
1237 #define  INIT_HCA_EQPC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
1238 #define  INIT_HCA_EEEC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
1239 #define  INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
1240 #define  INIT_HCA_LOG_EQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x67)
1241 #define  INIT_HCA_RDB_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x70)
1242 #define INIT_HCA_UDAV_OFFSET             0x0b0
1243 #define  INIT_HCA_UDAV_LKEY_OFFSET       (INIT_HCA_UDAV_OFFSET + 0x0)
1244 #define  INIT_HCA_UDAV_PD_OFFSET         (INIT_HCA_UDAV_OFFSET + 0x4)
1245 #define INIT_HCA_MCAST_OFFSET            0x0c0
1246 #define  INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
1247 #define  INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1248 #define  INIT_HCA_MC_HASH_SZ_OFFSET      (INIT_HCA_MCAST_OFFSET + 0x16)
1249 #define  INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1250 #define INIT_HCA_TPT_OFFSET              0x0f0
1251 #define  INIT_HCA_MPT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x00)
1252 #define  INIT_HCA_MTT_SEG_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x09)
1253 #define  INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
1254 #define  INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
1255 #define INIT_HCA_UAR_OFFSET              0x120
1256 #define  INIT_HCA_UAR_BASE_OFFSET        (INIT_HCA_UAR_OFFSET + 0x00)
1257 #define  INIT_HCA_UARC_SZ_OFFSET         (INIT_HCA_UAR_OFFSET + 0x09)
1258 #define  INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
1259 #define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1260 #define  INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
1261 #define  INIT_HCA_UAR_CTX_BASE_OFFSET    (INIT_HCA_UAR_OFFSET + 0x18)
1262
1263         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1264         if (IS_ERR(mailbox))
1265                 return PTR_ERR(mailbox);
1266         inbox = mailbox->buf;
1267
1268         RtlZeroMemory(inbox, INIT_HCA_IN_SIZE);
1269
1270 #if defined(__LITTLE_ENDIAN)
1271         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cl_hton32(1 << 1);
1272 #elif defined(__BIG_ENDIAN)
1273         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1 << 1);
1274 #else
1275 #error Host endianness not defined
1276 #endif
1277         /* Check port for UD address vector: */
1278         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1);
1279
1280         /* We leave wqe_quota, responder_exu, etc as 0 (default) */
1281
1282         /* QPC/EEC/CQC/EQC/RDB attributes */
1283
1284         MTHCA_PUT(inbox, param->qpc_base,     INIT_HCA_QPC_BASE_OFFSET);
1285         MTHCA_PUT(inbox, param->log_num_qps,  INIT_HCA_LOG_QP_OFFSET);
1286         MTHCA_PUT(inbox, param->eec_base,     INIT_HCA_EEC_BASE_OFFSET);
1287         MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET);
1288         MTHCA_PUT(inbox, param->srqc_base,    INIT_HCA_SRQC_BASE_OFFSET);
1289         MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1290         MTHCA_PUT(inbox, param->cqc_base,     INIT_HCA_CQC_BASE_OFFSET);
1291         MTHCA_PUT(inbox, param->log_num_cqs,  INIT_HCA_LOG_CQ_OFFSET);
1292         MTHCA_PUT(inbox, param->eqpc_base,    INIT_HCA_EQPC_BASE_OFFSET);
1293         MTHCA_PUT(inbox, param->eeec_base,    INIT_HCA_EEEC_BASE_OFFSET);
1294         MTHCA_PUT(inbox, param->eqc_base,     INIT_HCA_EQC_BASE_OFFSET);
1295         MTHCA_PUT(inbox, param->log_num_eqs,  INIT_HCA_LOG_EQ_OFFSET);
1296         MTHCA_PUT(inbox, param->rdb_base,     INIT_HCA_RDB_BASE_OFFSET);
1297
1298         /* UD AV attributes */
1299
1300         /* multicast attributes */
1301
1302         MTHCA_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
1303         MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1304         MTHCA_PUT(inbox, param->mc_hash_sz,      INIT_HCA_MC_HASH_SZ_OFFSET);
1305         MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1306
1307         /* TPT attributes */
1308
1309         MTHCA_PUT(inbox, param->mpt_base,   INIT_HCA_MPT_BASE_OFFSET);
1310         if (!mthca_is_memfree(dev))
1311                 MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
1312         MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1313         MTHCA_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
1314
1315         /* UAR attributes */
1316         {
1317                 u8 uar_page_sz = PAGE_SHIFT - 12;
1318                 MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1319         }
1320
1321         MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
1322
1323         if (mthca_is_memfree(dev)) {
1324                 MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
1325                 MTHCA_PUT(inbox, param->log_uar_sz,  INIT_HCA_LOG_UAR_SZ_OFFSET);
1326                 MTHCA_PUT(inbox, param->uarc_base,   INIT_HCA_UAR_CTX_BASE_OFFSET);
1327         }
1328
1329         err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
1330
1331         mthca_free_mailbox(dev, mailbox);
1332         return err;
1333 }
1334
1335 int mthca_INIT_IB(struct mthca_dev *dev,
1336                   struct mthca_init_ib_param *param,
1337                   int port, u8 *status)
1338 {
1339         struct mthca_mailbox *mailbox;
1340         u32 *inbox;
1341         int err;
1342         u32 flags;
1343
1344 #define INIT_IB_IN_SIZE                                         56
1345 #define INIT_IB_FLAGS_OFFSET                    0x00
1346 #define INIT_IB_FLAG_SIG                                        (1 << 18)
1347 #define INIT_IB_FLAG_NG                                         (1 << 17)
1348 #define INIT_IB_FLAG_G0                                         (1 << 16)
1349 #define INIT_IB_VL_SHIFT                                        4
1350 #define INIT_IB_PORT_WIDTH_SHIFT        8
1351 #define INIT_IB_MTU_SHIFT                               12
1352 #define INIT_IB_MAX_GID_OFFSET                  0x06
1353 #define INIT_IB_MAX_PKEY_OFFSET         0x0a
1354 #define INIT_IB_GUID0_OFFSET                    0x10
1355 #define INIT_IB_NODE_GUID_OFFSET        0x18
1356 #define INIT_IB_SI_GUID_OFFSET                  0x20
1357
1358         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1359         if (IS_ERR(mailbox))
1360                 return PTR_ERR(mailbox);
1361         inbox = mailbox->buf;
1362
1363         RtlZeroMemory(inbox, INIT_IB_IN_SIZE);
1364
1365         flags = 0;
1366         flags |= param->set_guid0     ? INIT_IB_FLAG_G0  : 0;
1367         flags |= param->set_node_guid ? INIT_IB_FLAG_NG  : 0;
1368         flags |= param->set_si_guid   ? INIT_IB_FLAG_SIG : 0;
1369         flags |= param->vl_cap << INIT_IB_VL_SHIFT;
1370         flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
1371         flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
1372         MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
1373
1374         MTHCA_PUT(inbox, param->gid_cap,   INIT_IB_MAX_GID_OFFSET);
1375         MTHCA_PUT(inbox, param->pkey_cap,  INIT_IB_MAX_PKEY_OFFSET);
1376         MTHCA_PUT(inbox, param->guid0,     INIT_IB_GUID0_OFFSET);
1377         MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
1378         MTHCA_PUT(inbox, param->si_guid,   INIT_IB_SI_GUID_OFFSET);
1379
1380         err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1381                         CMD_TIME_CLASS_A, status);
1382
1383         mthca_free_mailbox(dev, mailbox);
1384         return err;
1385 }
1386
1387 int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
1388 {
1389         return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status);
1390 }
1391
1392 int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
1393 {
1394         return mthca_cmd(dev, 0, 0, (u8)panic, CMD_CLOSE_HCA, HZ, status);
1395 }
1396
1397 int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1398                  int port, u8 *status)
1399 {
1400         struct mthca_mailbox *mailbox;
1401         u32 *inbox;
1402         int err;
1403         u32 flags = 0;
1404
1405 #define SET_IB_IN_SIZE         0x40
1406 #define SET_IB_FLAGS_OFFSET    0x00
1407 #define SET_IB_FLAG_SIG        (1 << 18)
1408 #define SET_IB_FLAG_RQK        (1 <<  0)
1409 #define SET_IB_CAP_MASK_OFFSET 0x04
1410 #define SET_IB_SI_GUID_OFFSET  0x08
1411
1412         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1413         if (IS_ERR(mailbox))
1414                 return PTR_ERR(mailbox);
1415         inbox = mailbox->buf;
1416
1417         RtlZeroMemory(inbox, SET_IB_IN_SIZE);
1418
1419         flags |= param->set_si_guid     ? SET_IB_FLAG_SIG : 0;
1420         flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0;
1421         MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET);
1422
1423         MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
1424         MTHCA_PUT(inbox, param->si_guid,  SET_IB_SI_GUID_OFFSET);
1425
1426         err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1427                         CMD_TIME_CLASS_B, status);
1428
1429         mthca_free_mailbox(dev, mailbox);
1430         return err;
1431 }
1432
1433 int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
1434 {
1435         return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
1436 }
1437
1438 int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1439 {
1440         struct mthca_mailbox *mailbox;
1441         __be64 *inbox;
1442         int err;
1443
1444         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1445         if (IS_ERR(mailbox))
1446                 return PTR_ERR(mailbox);
1447         inbox = mailbox->buf;
1448
1449         inbox[0] = cl_hton64(virt);
1450         inbox[1] = cl_hton64(dma_addr);
1451
1452         err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1453                         CMD_TIME_CLASS_B, status);
1454
1455         mthca_free_mailbox(dev, mailbox);
1456
1457         if (!err)
1458                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped page at %I64x to %I64x for ICM.\n",
1459                           (u64) dma_addr, (u64) virt));
1460
1461         return err;
1462 }
1463
1464 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
1465 {
1466         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Unmapping %d pages at %I64x from ICM.\n",
1467                   page_count, (u64) virt));
1468
1469         return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
1470 }
1471
1472 int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
1473 {
1474         return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, (u64)-1, status);
1475 }
1476
1477 int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
1478 {
1479         return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
1480 }
1481
1482 int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1483                        u8 *status)
1484 {
1485         int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
1486                                 CMD_TIME_CLASS_A, status);
1487
1488         if (ret || status)
1489                 return ret;
1490
1491         /*
1492          * Arbel page size is always 4 KB; round up number of system
1493          * pages needed.
1494          */
1495         *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
1496         *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
1497
1498         return 0;
1499 }
1500
1501 int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1502                     int mpt_index, u8 *status)
1503 {
1504         return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1505                          CMD_TIME_CLASS_B, status);
1506 }
1507
1508 int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1509                     int mpt_index, u8 *status)
1510 {
1511         return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1512                              (u8)!mailbox, CMD_HW2SW_MPT,
1513                              CMD_TIME_CLASS_B, status);
1514 }
1515
1516 int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1517                     int num_mtt, u8 *status)
1518 {
1519         return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1520                          CMD_TIME_CLASS_B, status);
1521 }
1522
1523 int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
1524 {
1525         return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status);
1526 }
1527
1528 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1529                  int eq_num, u8 *status)
1530 {
1531         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("%s mask %016I64x for eqn %d\n",
1532                   unmap ? "Clearing" : "Setting",
1533                   (u64) event_mask, eq_num));
1534         return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
1535                          0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
1536 }
1537
1538 int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1539                    int eq_num, u8 *status)
1540 {
1541         return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1542                          CMD_TIME_CLASS_A, status);
1543 }
1544
1545 int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1546                    int eq_num, u8 *status)
1547 {
1548         return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1549                              CMD_HW2SW_EQ,
1550                              CMD_TIME_CLASS_A, status);
1551 }
1552
1553 int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1554                    int cq_num, u8 *status)
1555 {
1556         return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1557                         CMD_TIME_CLASS_A, status);
1558 }
1559
1560 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1561                    int cq_num, u8 *status)
1562 {
1563         return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1564                              CMD_HW2SW_CQ,
1565                              CMD_TIME_CLASS_A, status);
1566 }
1567
1568 int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1569                     int srq_num, u8 *status)
1570 {
1571         return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1572                         CMD_TIME_CLASS_A, status);
1573 }
1574
1575 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1576                     int srq_num, u8 *status)
1577 {
1578         return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1579                              CMD_HW2SW_SRQ,
1580                              CMD_TIME_CLASS_A, status);
1581 }
1582
1583 int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
1584                     struct mthca_mailbox *mailbox, u8 *status)
1585 {
1586         return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
1587                              CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
1588 }
1589
1590 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1591 {
1592         return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1593                          CMD_TIME_CLASS_B, status);
1594 }
1595
1596 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1597                     int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1598                     u8 *status)
1599 {
1600         enum {
1601                 MTHCA_TRANS_INVALID = 0,
1602                 MTHCA_TRANS_RST2INIT,
1603                 MTHCA_TRANS_INIT2INIT,
1604                 MTHCA_TRANS_INIT2RTR,
1605                 MTHCA_TRANS_RTR2RTS,
1606                 MTHCA_TRANS_RTS2RTS,
1607                 MTHCA_TRANS_SQERR2RTS,
1608                 MTHCA_TRANS_ANY2ERR,
1609                 MTHCA_TRANS_RTS2SQD,
1610                 MTHCA_TRANS_SQD2SQD,
1611                 MTHCA_TRANS_SQD2RTS,
1612                 MTHCA_TRANS_ANY2RST,
1613         };
1614         static const u16 op[] = {
1615                 0,                                                                                      /* MTHCA_TRANS_INVALID */
1616                 CMD_RST2INIT_QPEE,              /* MTHCA_TRANS_RST2INIT */
1617                 CMD_INIT2INIT_QPEE,             /* MTHCA_TRANS_INIT2INIT */
1618                 CMD_INIT2RTR_QPEE,              /* MTHCA_TRANS_INIT2RTR */
1619                 CMD_RTR2RTS_QPEE,               /* MTHCA_TRANS_RTR2RTS */
1620                 CMD_RTS2RTS_QPEE,               /* MTHCA_TRANS_RTS2RTS */
1621                 CMD_SQERR2RTS_QPEE,     /* MTHCA_TRANS_SQERR2RTS */
1622                 CMD_2ERR_QPEE,                          /* MTHCA_TRANS_ANY2ERR */
1623                 CMD_RTS2SQD_QPEE,               /* MTHCA_TRANS_RTS2SQD */
1624                 CMD_SQD2SQD_QPEE,               /* MTHCA_TRANS_SQD2SQD */
1625                 CMD_SQD2RTS_QPEE,               /* MTHCA_TRANS_SQD2RTS */
1626                 CMD_ERR2RST_QPEE                        /* MTHCA_TRANS_ANY2RST */
1627         };
1628         u8 op_mod = 0;
1629         int my_mailbox = 0;
1630         int err;
1631
1632         UNREFERENCED_PARAMETER(optmask);
1633
1634         if (trans < 0 || trans >= ARRAY_SIZE(op))
1635                 return -EINVAL;
1636
1637         if (trans == MTHCA_TRANS_ANY2RST) {
1638                 op_mod = 3;     /* don't write outbox, any->reset */
1639
1640                 /* For debugging */
1641                 if (!mailbox) {
1642                         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1643                         if (!IS_ERR(mailbox)) {
1644                                 my_mailbox = 1;
1645                                 op_mod     = 2; /* write outbox, any->reset */
1646                         } else
1647                                 mailbox = NULL;
1648                 }
1649         } else {
1650                 { // debug print
1651                         int i;
1652                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
1653                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  opt param mask: %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
1654                         for (i = 2; i < 0x100 / 4; i=i+4) {
1655                                 HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  [%02x] %08x %08x %08x %08x\n",i-2,
1656                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
1657                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
1658                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
1659                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
1660                         }
1661                 }
1662         }
1663
1664         if (trans == MTHCA_TRANS_ANY2RST) {
1665                 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1666                                     (!!is_ee << 24) | num, op_mod,
1667                                     op[trans], CMD_TIME_CLASS_C, status);
1668
1669                 if (mailbox) { // debug print
1670                         int i;
1671                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
1672                         for (i = 2; i < 0x100 / 4; i=i+4) {
1673                                 HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  [%02x] %08x %08x %08x %08x\n",i-2,
1674                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
1675                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
1676                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
1677                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
1678                         }
1679                 }
1680         } else
1681                 err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
1682                                 op_mod, op[trans], CMD_TIME_CLASS_C, status);
1683
1684         if (my_mailbox)
1685                 mthca_free_mailbox(dev, mailbox);
1686
1687         return err;
1688 }
1689
1690 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1691                    struct mthca_mailbox *mailbox, u8 *status)
1692 {
1693         return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1694                              CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
1695 }
1696
1697 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
1698                           u8 *status)
1699 {
1700         u8 op_mod;
1701
1702         switch (type) {
1703         case IB_QPT_QP0:
1704                 op_mod = 0;
1705                 break;
1706         case IB_QPT_QP1:
1707                 op_mod = 1;
1708                 break;
1709         case IB_QPT_RAW_IPV6:
1710                 op_mod = 2;
1711                 break;
1712         case IB_QPT_RAW_ETHER:
1713                 op_mod = 3;
1714                 break;
1715         default:
1716                 return -EINVAL;
1717         }
1718
1719         return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
1720                          CMD_TIME_CLASS_B, status);
1721 }
1722
1723 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1724                   int port, struct _ib_wc *in_wc, struct _ib_grh *in_grh,
1725                   void *in_mad, void *response_mad, u8 *status)
1726 {
1727         struct mthca_mailbox *inmailbox, *outmailbox;
1728         u8 *inbox;
1729         int err;
1730         u32 in_modifier = port;
1731         u8 op_modifier = 0;
1732
1733
1734 #define MAD_IFC_BOX_SIZE      0x400
1735 #define MAD_IFC_MY_QPN_OFFSET 0x100
1736 #define MAD_IFC_RQPN_OFFSET   0x108
1737 #define MAD_IFC_SL_OFFSET     0x10c
1738 #define MAD_IFC_G_PATH_OFFSET 0x10d
1739 #define MAD_IFC_RLID_OFFSET   0x10e
1740 #define MAD_IFC_PKEY_OFFSET   0x112
1741 #define MAD_IFC_GRH_OFFSET    0x140
1742
1743         inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1744         if (IS_ERR(inmailbox))
1745                 return PTR_ERR(inmailbox);
1746         inbox = inmailbox->buf;
1747
1748         outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1749         if (IS_ERR(outmailbox)) {
1750                 mthca_free_mailbox(dev, inmailbox);
1751                 return PTR_ERR(outmailbox);
1752         }
1753
1754         memcpy(inbox, in_mad, 256);
1755
1756         /*
1757          * Key check traps can't be generated unless we have in_wc to
1758          * tell us where to send the trap.
1759          */
1760         if (ignore_mkey || !in_wc)
1761                 op_modifier |= 0x1;
1762         if (ignore_bkey || !in_wc)
1763                 op_modifier |= 0x2;
1764
1765         if (in_wc) {
1766                 u8 val;
1767
1768                 memset(inbox + 256, 0, 256);
1769
1770
1771                 MTHCA_PUT(inbox, 0, MAD_IFC_MY_QPN_OFFSET);
1772                 MTHCA_PUT(inbox, cl_ntoh32(in_wc->recv.ud.remote_qp), MAD_IFC_RQPN_OFFSET);
1773                 val = in_wc->recv.ud.remote_sl << 4;
1774                 MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET);
1775
1776                 val = in_wc->recv.ud.path_bits |
1777                         (in_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID ? 0x80 : 0);
1778                 MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET)
1779
1780                 MTHCA_PUT(inbox, cl_ntoh16(in_wc->recv.ud.remote_lid), MAD_IFC_RLID_OFFSET);
1781                 MTHCA_PUT(inbox, in_wc->recv.ud.pkey_index, MAD_IFC_PKEY_OFFSET);
1782
1783                 if (in_grh)
1784                         memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1785
1786                 op_modifier |= 0x4;
1787                 
1788                 in_modifier |= cl_ntoh16(in_wc->recv.ud.remote_lid) << 16;
1789
1790         }
1791
1792         err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1793                             in_modifier, op_modifier,
1794                             CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
1795
1796         if (!err && !*status)
1797                 memcpy(response_mad, outmailbox->buf, 256);
1798
1799         mthca_free_mailbox(dev, inmailbox);
1800         mthca_free_mailbox(dev, outmailbox);
1801         return err;
1802 }
1803
1804 int mthca_READ_MGM(struct mthca_dev *dev, int index,
1805                    struct mthca_mailbox *mailbox, u8 *status)
1806 {
1807         return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1808                              CMD_READ_MGM, CMD_TIME_CLASS_A, status);
1809 }
1810
1811 int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1812                     struct mthca_mailbox *mailbox, u8 *status)
1813 {
1814         return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1815                          CMD_TIME_CLASS_A, status);
1816 }
1817
1818 int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1819                     u16 *hash, u8 *status)
1820 {
1821         u64 imm;
1822         int err;
1823
1824         err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1825                             CMD_TIME_CLASS_A, status);
1826
1827         *hash = (u16)imm;
1828         return err;
1829 }
1830
1831 int mthca_NOP(struct mthca_dev *dev, u8 *status)
1832 {
1833         return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, 100000, status);     /* 100 msecs */
1834 }