62c156a521228508fa92e9353aed4f020dd14a81
[mirror/winof/.git] / hw / mthca / kernel / mthca_cmd.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id$
34  */
35
36 #include <ib_mad.h>
37
38 #include "mthca_dev.h"
39 #if defined(EVENT_TRACING)
40 #ifdef offsetof
41 #undef offsetof
42 #endif
43 #include "mthca_cmd.tmh"
44 #endif
45 #include "mthca_config_reg.h"
46 #include "mthca_cmd.h"
47 #include "mthca_memfree.h"
48
49 #define CMD_POLL_TOKEN 0xffff
50
51 enum {
52         HCR_IN_PARAM_OFFSET    = 0x00,
53         HCR_IN_MODIFIER_OFFSET = 0x08,
54         HCR_OUT_PARAM_OFFSET   = 0x0c,
55         HCR_TOKEN_OFFSET       = 0x14,
56         HCR_STATUS_OFFSET      = 0x18,
57
58         HCR_OPMOD_SHIFT        = 12,
59         HCA_E_BIT              = 22,
60         HCR_GO_BIT             = 23
61 };
62
63 enum {
64         /* initialization and general commands */
65         CMD_SYS_EN          = 0x1,
66         CMD_SYS_DIS         = 0x2,
67         CMD_MAP_FA          = 0xfff,
68         CMD_UNMAP_FA        = 0xffe,
69         CMD_RUN_FW          = 0xff6,
70         CMD_MOD_STAT_CFG    = 0x34,
71         CMD_QUERY_DEV_LIM   = 0x3,
72         CMD_QUERY_FW        = 0x4,
73         CMD_ENABLE_LAM      = 0xff8,
74         CMD_DISABLE_LAM     = 0xff7,
75         CMD_QUERY_DDR       = 0x5,
76         CMD_QUERY_ADAPTER   = 0x6,
77         CMD_INIT_HCA        = 0x7,
78         CMD_CLOSE_HCA       = 0x8,
79         CMD_INIT_IB         = 0x9,
80         CMD_CLOSE_IB        = 0xa,
81         CMD_QUERY_HCA       = 0xb,
82         CMD_SET_IB          = 0xc,
83         CMD_ACCESS_DDR      = 0x2e,
84         CMD_MAP_ICM         = 0xffa,
85         CMD_UNMAP_ICM       = 0xff9,
86         CMD_MAP_ICM_AUX     = 0xffc,
87         CMD_UNMAP_ICM_AUX   = 0xffb,
88         CMD_SET_ICM_SIZE    = 0xffd,
89
90         /* TPT commands */
91         CMD_SW2HW_MPT       = 0xd,
92         CMD_QUERY_MPT       = 0xe,
93         CMD_HW2SW_MPT       = 0xf,
94         CMD_READ_MTT        = 0x10,
95         CMD_WRITE_MTT       = 0x11,
96         CMD_SYNC_TPT        = 0x2f,
97
98         /* EQ commands */
99         CMD_MAP_EQ          = 0x12,
100         CMD_SW2HW_EQ        = 0x13,
101         CMD_HW2SW_EQ        = 0x14,
102         CMD_QUERY_EQ        = 0x15,
103
104         /* CQ commands */
105         CMD_SW2HW_CQ        = 0x16,
106         CMD_HW2SW_CQ        = 0x17,
107         CMD_QUERY_CQ        = 0x18,
108         CMD_RESIZE_CQ       = 0x2c,
109
110         /* SRQ commands */
111         CMD_SW2HW_SRQ       = 0x35,
112         CMD_HW2SW_SRQ       = 0x36,
113         CMD_QUERY_SRQ       = 0x37,
114         CMD_ARM_SRQ         = 0x40,
115
116         /* QP/EE commands */
117         CMD_RST2INIT_QPEE   = 0x19,
118         CMD_INIT2RTR_QPEE   = 0x1a,
119         CMD_RTR2RTS_QPEE    = 0x1b,
120         CMD_RTS2RTS_QPEE    = 0x1c,
121         CMD_SQERR2RTS_QPEE  = 0x1d,
122         CMD_2ERR_QPEE       = 0x1e,
123         CMD_RTS2SQD_QPEE    = 0x1f,
124         CMD_SQD2SQD_QPEE    = 0x38,
125         CMD_SQD2RTS_QPEE    = 0x20,
126         CMD_ERR2RST_QPEE    = 0x21,
127         CMD_QUERY_QPEE      = 0x22,
128         CMD_INIT2INIT_QPEE  = 0x2d,
129         CMD_SUSPEND_QPEE    = 0x32,
130         CMD_UNSUSPEND_QPEE  = 0x33,
131         /* special QPs and management commands */
132         CMD_CONF_SPECIAL_QP = 0x23,
133         CMD_MAD_IFC         = 0x24,
134
135         /* multicast commands */
136         CMD_READ_MGM        = 0x25,
137         CMD_WRITE_MGM       = 0x26,
138         CMD_MGID_HASH       = 0x27,
139
140         /* miscellaneous commands */
141         CMD_DIAG_RPRT       = 0x30,
142         CMD_NOP             = 0x31,
143
144         /* debug commands */
145         CMD_QUERY_DEBUG_MSG = 0x2a,
146         CMD_SET_DEBUG_MSG   = 0x2b,
147 };
148
149 /*
150  * According to Mellanox code, FW may be starved and never complete
151  * commands.  So we can't use strict timeouts described in PRM -- we
152  * just arbitrarily select 60 seconds for now.
153  */
154 #define CMD_POLL_N_TRIES                60
155
156 enum {
157         CMD_TIME_CLASS_A = 60 * HZ,
158         CMD_TIME_CLASS_B = 60 * HZ,
159         CMD_TIME_CLASS_C = 60 * HZ
160 };
161
162 enum {
163         GO_BIT_TIMEOUT = 10 * HZ
164 };
165
166 #define GO_BIT_N_TRIES          5
167 #define GO_BIT_STALL_TIMEOUT            ((GO_BIT_TIMEOUT/HZ)/GO_BIT_N_TRIES)            /* usecs */
168
169 struct mthca_cmd_context {
170         KEVENT  event;
171         int               result;
172         int               next;
173         u64               out_param;
174         u16               token;
175         u8                status;
176 };
177
178 static inline int go_bit(struct mthca_dev *dev)
179 {
180         return readl(dev->hcr + HCR_STATUS_OFFSET) &
181                 _byteswap_ulong(1 << HCR_GO_BIT);
182 }
183
184 /* 
185 *       Function: performs busy-wait loop, while polling GO bit
186 *       Return: 0 when GO bit was extinguished in time 
187 */
188 static int poll_go_bit(struct mthca_dev *dev)
189 {
190         int i=0; /* init must be here !*/
191         
192         if (!go_bit(dev)) 
193                 return 0;
194
195         for (; i<GO_BIT_N_TRIES; i++) {
196                 /* Nope, stall for a little bit and try again. */
197                 KeStallExecutionProcessor( GO_BIT_STALL_TIMEOUT );
198                 if (!go_bit(dev))
199                         return 0;
200         }               
201         
202         return 1;
203 }
204
205 /* 
206 * Function: put thread on hold, while polling GO bit
207 * Return: 0 when GO bit was extinguished in time 
208 * Note: the functions make c. CMD_POLL_N_TRIES polls
209 */
210 static int wait_go_bit(struct mthca_dev *dev, unsigned long timeout_usecs)
211 {
212 #ifdef USE_FAIR_GO_BIT_POLLING  
213 //
214 // the algorithm polls 'go bit'  N_POLL_TRIES times with a polling interval,
215 // increasing from 0 to MAX_POLL_INTERVAL with step of POLL_INTERVAL_DELTA
216 //
217 // The values of the above contains are set voluntarily.
218 // They require evetual tuning for which reason the algorithm is extinguished for now.
219
220                 int i = 0;
221 #define POLL_INTERVAL_DELTA             5 *(-10)        // 10 usec
222 #define MAX_POLL_INTERVAL                       200 *(-10)      // 200 usec
223 #define N_POLL_TRIES                            40
224 #endif
225         u64 start, end;
226         LARGE_INTEGER  interval;
227
228         if (!go_bit(dev))       return 0;
229
230         interval.QuadPart = 0;
231         start = cl_get_time_stamp();
232         end = start + timeout_usecs;
233         while (go_bit(dev) && (cl_get_time_stamp() < end)) {
234                 KeDelayExecutionThread( KernelMode, FALSE, &interval );
235 #ifdef USE_FAIR_GO_BIT_POLLING  
236                 if (++i >= N_POLL_TRIES) {
237                         if ( (__int64)interval.QuadPart > (__int64)MAX_POLL_INTERVAL)
238                                 interval.QuadPart += POLL_INTERVAL_DELTA;
239                         i = 0;
240                 }
241 #endif          
242         }
243
244         if (!go_bit(dev))       return 0;
245         return 1;       
246 }
247
248
249 static int mthca_cmd_post(struct mthca_dev *dev,
250                           u64 in_param,
251                           u64 out_param,
252                           u32 in_modifier,
253                           u8 op_modifier,
254                           u16 op,
255                           u16 token,
256                           int event)
257 {
258         int err = 0;
259
260         down(&dev->cmd.hcr_mutex);
261
262         if (event && wait_go_bit(dev,GO_BIT_TIMEOUT)) {
263                 err = -EAGAIN;
264                 goto out;
265         }
266
267         /*
268          * We use writel (instead of something like memcpy_toio)
269          * because writes of less than 32 bits to the HCR don't work
270          * (and some architectures such as ia64 implement memcpy_toio
271          * in terms of writeb).
272          */
273         __raw_writel((u32) cl_hton32((u32)(in_param >> 32)),           (u8 *)dev->hcr + 0 * 4);
274         __raw_writel((u32) cl_hton32((u32)(in_param & 0xfffffffful)), (u8 *) dev->hcr + 1 * 4);
275         __raw_writel((u32) cl_hton32(in_modifier),              (u8 *)dev->hcr + 2 * 4);
276         __raw_writel((u32) cl_hton32((u32)(out_param >> 32)),          (u8 *)dev->hcr + 3 * 4);
277         __raw_writel((u32) cl_hton32((u32)(out_param & 0xfffffffful)), (u8 *)dev->hcr + 4 * 4);
278         __raw_writel((u32) cl_hton32(token << 16),              (u8 *)dev->hcr + 5 * 4);
279
280         /* __raw_writel may not order writes. */
281         wmb();
282
283         __raw_writel((u32) cl_hton32((1 << HCR_GO_BIT)                |
284                                                (event ? (1 << HCA_E_BIT) : 0)   |
285                                                (op_modifier << HCR_OPMOD_SHIFT) |
286                                                op),                       (u8 *)dev->hcr + 6 * 4);
287
288 out:
289         up(&dev->cmd.hcr_mutex);
290         return err;
291 }
292
293
294 static int mthca_cmd_poll(struct mthca_dev *dev,
295                           u64 in_param,
296                           u64 *out_param,
297                           int out_is_imm,
298                           u32 in_modifier,
299                           u8 op_modifier,
300                           u16 op,
301                           unsigned long timeout,
302                           u8 *status)
303 {
304         int err = 0;
305
306         sem_down(&dev->cmd.poll_sem);
307
308         err = mthca_cmd_post(dev, in_param,
309                              out_param ? *out_param : 0,
310                              in_modifier, op_modifier,
311                              op, CMD_POLL_TOKEN, 0);
312         if (err)
313                 goto out;
314
315         if (wait_go_bit(dev,timeout)) {
316                 err = -EBUSY;
317                 goto out;
318         }
319         
320         if (out_is_imm)
321                 *out_param = 
322                         (u64) cl_ntoh32((__be32)
323                                           __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
324                         (u64) cl_ntoh32((__be32)
325                                           __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
326
327         *status = (u8)(cl_ntoh32((__be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24);
328         if (*status)
329                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
330                           op, *status));
331
332 out:
333         sem_up(&dev->cmd.poll_sem);
334         return err;
335 }
336
337 void mthca_cmd_event(struct mthca_dev *dev,
338                      u16 token,
339                      u8  status,
340                      u64 out_param)
341 {
342         struct mthca_cmd_context *context =
343                 &dev->cmd.context[token & dev->cmd.token_mask];
344
345         /* previously timed out command completing at long last */
346         if (token != context->token)
347                 return;
348
349         context->result    = 0;
350         context->status    = status;
351         context->out_param = out_param;
352
353         context->token += dev->cmd.token_mask + 1;
354
355         ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
356         KeSetEvent( &context->event, 0, FALSE );
357 }
358
359 static int mthca_cmd_wait(struct mthca_dev *dev,
360                           u64 in_param,
361                           u64 *out_param,
362                           int out_is_imm,
363                           u32 in_modifier,
364                           u8 op_modifier,
365                           u16 op,
366                           unsigned long timeout,
367                           u8 *status)
368 {
369         int err = 0;
370         struct mthca_cmd_context *context;
371         SPIN_LOCK_PREP(lh);
372
373         sem_down(&dev->cmd.event_sem);
374
375         spin_lock( &dev->cmd.context_lock, &lh );
376         BUG_ON(dev->cmd.free_head < 0);
377         context = &dev->cmd.context[dev->cmd.free_head];
378         dev->cmd.free_head = context->next;
379         spin_unlock( &lh );
380
381         KeClearEvent(   &context->event );
382         err = mthca_cmd_post(dev, in_param,
383                              out_param ? *out_param : 0,
384                              in_modifier, op_modifier,
385                              op, context->token, 1);
386         if (err)
387                 goto out;
388
389         {
390                 NTSTATUS res;
391                 LARGE_INTEGER  interval;
392                 interval.QuadPart = (-10)* (__int64)timeout;
393                 res = KeWaitForSingleObject( &context->event, Executive, KernelMode, FALSE,  &interval );
394                 if (res != STATUS_SUCCESS) {
395                         err = -EBUSY;
396                         goto out;
397                 }
398         }
399
400         *status = context->status;
401         if (*status)
402                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n",
403                           op, *status));
404
405         if (out_is_imm)
406                 *out_param = context->out_param;
407
408 out:
409         spin_lock(&dev->cmd.context_lock, &lh);
410         context->next = dev->cmd.free_head;
411         dev->cmd.free_head = (int)(context - dev->cmd.context);
412         spin_unlock(&lh);
413
414         sem_up( &dev->cmd.event_sem );
415
416         return err;
417 }
418
419 /* Invoke a command with an output mailbox */
420 static int mthca_cmd_box(struct mthca_dev *dev,
421                          u64 in_param,
422                          u64 out_param,
423                          u32 in_modifier,
424                          u8 op_modifier,
425                          u16 op,
426                          unsigned long timeout,
427                          u8 *status)
428 {
429         if (dev->cmd.use_events)
430                 return mthca_cmd_wait(dev, in_param, &out_param, 0,
431                                       in_modifier, op_modifier, op,
432                                       timeout, status);
433         else
434                 return mthca_cmd_poll(dev, in_param, &out_param, 0,
435                                       in_modifier, op_modifier, op,
436                                       timeout, status);
437 }
438
439 /* Invoke a command with no output parameter */
440 static int mthca_cmd(struct mthca_dev *dev,
441                      u64 in_param,
442                      u32 in_modifier,
443                      u8 op_modifier,
444                      u16 op,
445                      unsigned long timeout,
446                      u8 *status)
447 {
448         return mthca_cmd_box(dev, in_param, 0, in_modifier,
449                              op_modifier, op, timeout, status);
450 }
451
452 /*
453  * Invoke a command with an immediate output parameter (and copy the
454  * output into the caller's out_param pointer after the command
455  * executes).
456  */
457 static int mthca_cmd_imm(struct mthca_dev *dev,
458                          u64 in_param,
459                          u64 *out_param,
460                          u32 in_modifier,
461                          u8 op_modifier,
462                          u16 op,
463                          unsigned long timeout,
464                          u8 *status)
465 {
466         if (dev->cmd.use_events)
467                 return mthca_cmd_wait(dev, in_param, out_param, 1,
468                                       in_modifier, op_modifier, op,
469                                       timeout, status);
470         else
471                 return mthca_cmd_poll(dev, in_param, out_param, 1,
472                                       in_modifier, op_modifier, op,
473                                       timeout, status);
474 }
475
476 int mthca_cmd_init(struct mthca_dev *dev)
477 {
478         KeInitializeMutex(&dev->cmd.hcr_mutex, 0);
479         sem_init(&dev->cmd.poll_sem, 1, 1);
480         dev->cmd.use_events = 0;
481
482         dev->hcr = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_HCR) + MTHCA_HCR_BASE,
483                            MTHCA_HCR_SIZE, &dev->hcr_size);
484         if (!dev->hcr) {
485                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_LOW  ,("Couldn't map command register."));
486                 return -ENOMEM;
487         }
488
489         dev->cmd.pool = pci_pool_create("mthca_cmd", dev,
490                                         MTHCA_MAILBOX_SIZE,
491                                         MTHCA_MAILBOX_SIZE, 0);
492         if (!dev->cmd.pool) {
493                 iounmap(dev->hcr, dev->hcr_size);
494                 return -ENOMEM;
495         }
496
497         return 0;
498 }
499
500 void mthca_cmd_cleanup(struct mthca_dev *dev)
501 {
502         pci_pool_destroy(dev->cmd.pool);
503         iounmap(dev->hcr, dev->hcr_size);
504 }
505
506 /*
507  * Switch to using events to issue FW commands (should be called after
508  * event queue to command events has been initialized).
509  */
510 int mthca_cmd_use_events(struct mthca_dev *dev)
511 {
512         int i;
513
514         dev->cmd.context = kmalloc(dev->cmd.max_cmds *
515                                    sizeof (struct mthca_cmd_context),
516                                    GFP_KERNEL);
517         if (!dev->cmd.context)
518                 return -ENOMEM;
519
520         for (i = 0; i < dev->cmd.max_cmds; ++i) {
521                 dev->cmd.context[i].token = (u16)i;
522                 dev->cmd.context[i].next = i + 1;
523         KeInitializeEvent(      &dev->cmd.context[i].event, NotificationEvent , FALSE );
524         }
525
526         dev->cmd.context[dev->cmd.max_cmds - 1].next = -1;
527         dev->cmd.free_head = 0;
528
529         sem_init(&dev->cmd.event_sem, dev->cmd.max_cmds, LONG_MAX);
530         spin_lock_init(&dev->cmd.context_lock);
531
532         for (dev->cmd.token_mask = 1;
533              dev->cmd.token_mask < dev->cmd.max_cmds;
534              dev->cmd.token_mask <<= 1)
535                 ; /* nothing */
536         --dev->cmd.token_mask;
537
538         dev->cmd.use_events = 1;
539         sem_down(&dev->cmd.poll_sem);
540
541         return 0;
542 }
543
544 /*
545  * Switch back to polling (used when shutting down the device)
546  */
547 void mthca_cmd_use_polling(struct mthca_dev *dev)
548 {
549         int i;
550
551         dev->cmd.use_events = 0;
552
553         for (i = 0; i < dev->cmd.max_cmds; ++i)
554                 sem_down(&dev->cmd.event_sem);
555
556         kfree(dev->cmd.context);
557
558         sem_up(&dev->cmd.poll_sem);
559 }
560
561 struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
562                                           unsigned int gfp_mask)
563 {
564         struct mthca_mailbox *mailbox;
565
566         mailbox = kmalloc(sizeof *mailbox, gfp_mask);
567         if (!mailbox)
568                 return ERR_PTR(-ENOMEM);
569
570         mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma);
571         if (!mailbox->buf) {
572                 kfree(mailbox);
573                 return ERR_PTR(-ENOMEM);
574         }
575
576         return mailbox;
577 }
578
579 void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
580 {
581         if (!mailbox)
582                 return;
583
584         pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
585         kfree(mailbox);
586 }
587
588 int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
589 {
590         u64 out;
591         int ret;
592
593         ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status);
594
595         if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
596                 HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SYS_EN DDR error: syn=%x, sock=%d, "
597                            "sladdr=%d, SPD source=%s\n",
598                            (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
599                            (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM"));
600
601         return ret;
602 }
603
604 int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
605 {
606         return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status);
607 }
608
609 static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
610                          u64 virt, u8 *status)
611 {
612         struct mthca_mailbox *mailbox;
613         struct mthca_icm_iter iter;
614         __be64 *pages;
615         int lg;
616         int nent = 0;
617         unsigned long i;
618         int err = 0;
619         int ts = 0, tc = 0;
620         CPU_2_BE64_PREP;
621
622         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
623         if (IS_ERR(mailbox))
624                 return PTR_ERR(mailbox);
625         RtlZeroMemory(mailbox->buf, MTHCA_MAILBOX_SIZE);
626         pages = mailbox->buf;
627
628         for (mthca_icm_first(icm, &iter);
629              !mthca_icm_last(&iter);
630              mthca_icm_next(&iter)) {
631                 /*
632                  * We have to pass pages that are aligned to their
633                  * size, so find the least significant 1 in the
634                  * address or size and use that as our log2 size.
635                  */
636                 i = (u32)mthca_icm_addr(&iter) | mthca_icm_size(&iter);
637                 lg = ffs(i) - 1;
638                 if (lg < 12) {
639                         HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Got FW area not aligned to 4K (%I64x/%lx).\n",
640                                    (u64) mthca_icm_addr(&iter),
641                                    mthca_icm_size(&iter)));
642                         err = -EINVAL;
643                         goto out;
644                 }
645                 for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
646                         if (virt != -1) {
647                                 pages[nent * 2] = cl_hton64(virt);
648                                 virt += 1Ui64 << lg;
649                         }
650                         pages[nent * 2 + 1] = CPU_2_BE64((mthca_icm_addr(&iter) +
651                                                            (i << lg)) | (lg - 12));
652                         ts += 1 << (lg - 10);
653                         ++tc;
654
655                         if (++nent == MTHCA_MAILBOX_SIZE / 16) {
656                                 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
657                                                 CMD_TIME_CLASS_B, status);
658                                 if (err || *status)
659                                         goto out;
660                                 nent = 0;
661                         }
662                 }
663         }
664
665         if (nent)
666                 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
667                                 CMD_TIME_CLASS_B, status);
668
669         switch (op) {
670         case CMD_MAP_FA:
671                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for FW.\n", tc, ts));
672                 break;
673         case CMD_MAP_ICM_AUX:
674                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Mapped %d chunks/%d KB for ICM aux.\n", tc, ts));
675                 break;
676         case CMD_MAP_ICM:
677                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped %d chunks/%d KB at %I64x for ICM.\n",
678                           tc, ts, (u64) virt - (ts << 10)));
679                 break;
680         }
681
682 out:
683         mthca_free_mailbox(dev, mailbox);
684         return err;
685 }
686
687 int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
688 {
689         return mthca_map_cmd(dev, CMD_MAP_FA, icm, (u64)-1, status);
690 }
691
692 int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
693 {
694         return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
695 }
696
697 int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
698 {
699         return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
700 }
701
702 int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
703 {
704         struct mthca_mailbox *mailbox;
705         u32 *outbox;
706         int err = 0;
707         u8 lg;
708
709 #define QUERY_FW_OUT_SIZE             0x100
710 #define QUERY_FW_VER_OFFSET            0x00
711 #define QUERY_FW_MAX_CMD_OFFSET        0x0f
712 #define QUERY_FW_ERR_START_OFFSET      0x30
713 #define QUERY_FW_ERR_SIZE_OFFSET       0x38
714
715 #define QUERY_FW_START_OFFSET          0x20
716 #define QUERY_FW_END_OFFSET            0x28
717
718 #define QUERY_FW_SIZE_OFFSET           0x00
719 #define QUERY_FW_CLR_INT_BASE_OFFSET   0x20
720 #define QUERY_FW_EQ_ARM_BASE_OFFSET    0x40
721 #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48
722
723         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
724         if (IS_ERR(mailbox))
725                 return PTR_ERR(mailbox);
726         outbox = mailbox->buf;
727
728         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
729                             CMD_TIME_CLASS_A, status);
730
731         if (err)
732                 goto out;
733
734         MTHCA_GET(dev->fw_ver,   outbox, QUERY_FW_VER_OFFSET);
735         /*
736          * FW subSIZE_Tor version is at more signifant bits than minor
737          * version, so swap here.
738          */
739         dev->fw_ver = (dev->fw_ver & 0xffff00000000Ui64) |
740                 ((dev->fw_ver & 0xffff0000Ui64) >> 16) |
741                 ((dev->fw_ver & 0x0000ffffUi64) << 16);
742
743         MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
744         dev->cmd.max_cmds = 1 << lg;
745         MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);      
746         MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
747
748         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW version %012I64x, max commands %d\n",
749                   (u64) dev->fw_ver, dev->cmd.max_cmds));
750         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Catastrophic error buffer at 0x%I64x, size 0x%x\n",
751                 (u64) dev->catas_err.addr, dev->catas_err.size));
752
753
754         if (mthca_is_memfree(dev)) {
755                 MTHCA_GET(dev->fw.arbel.fw_pages,       outbox, QUERY_FW_SIZE_OFFSET);
756                 MTHCA_GET(dev->fw.arbel.clr_int_base,   outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
757                 MTHCA_GET(dev->fw.arbel.eq_arm_base,    outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
758                 MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET);
759                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("FW size %d KB\n", dev->fw.arbel.fw_pages << 2));
760
761                 /*
762                  * Arbel page size is always 4 KB; round up number of
763                  * system pages needed.
764                  */
765                 dev->fw.arbel.fw_pages =
766                         ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
767                                 (PAGE_SHIFT - 12);
768
769                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Clear int @ %I64x, EQ arm @ %I64x, EQ set CI @ %I64x\n",
770                           (u64) dev->fw.arbel.clr_int_base,
771                           (u64) dev->fw.arbel.eq_arm_base,
772                           (u64) dev->fw.arbel.eq_set_ci_base));
773         } else {
774                 MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET);
775                 MTHCA_GET(dev->fw.tavor.fw_end,   outbox, QUERY_FW_END_OFFSET);
776
777                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW size %d KB (start %I64x, end %I64x)\n",
778                           (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10),
779                           (u64) dev->fw.tavor.fw_start,
780                           (u64) dev->fw.tavor.fw_end));
781         }
782
783 out:
784         mthca_free_mailbox(dev, mailbox);
785         return err;
786 }
787
788 int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
789 {
790         struct mthca_mailbox *mailbox;
791         u8 info;
792         u32 *outbox;
793         int err = 0;
794
795 #define ENABLE_LAM_OUT_SIZE         0x100
796 #define ENABLE_LAM_START_OFFSET     0x00
797 #define ENABLE_LAM_END_OFFSET       0x08
798 #define ENABLE_LAM_INFO_OFFSET      0x13
799
800 #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4)
801 #define ENABLE_LAM_INFO_ECC_MASK    0x3
802
803         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
804         if (IS_ERR(mailbox))
805                 return PTR_ERR(mailbox);
806         outbox = mailbox->buf;
807
808         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
809                             CMD_TIME_CLASS_C, status);
810
811         if (err)
812                 goto out;
813
814         if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
815                 goto out;
816
817         MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
818         MTHCA_GET(dev->ddr_end,   outbox, ENABLE_LAM_END_OFFSET);
819         MTHCA_GET(info,           outbox, ENABLE_LAM_INFO_OFFSET);
820
821         if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) !=
822             !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
823                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
824                            "is %s hidden; does not match PCI config\n",
825                            (info & ENABLE_LAM_INFO_HIDDEN_FLAG)?
826                            "" : "not"));
827         }
828         if (info & ENABLE_LAM_INFO_HIDDEN_FLAG)
829                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
830
831         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
832                   (int) ((dev->ddr_end - dev->ddr_start) >> 10),
833                   (u64) dev->ddr_start,
834                   (u64) dev->ddr_end));
835
836 out:
837         mthca_free_mailbox(dev, mailbox);
838         return err;
839 }
840
841 int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
842 {
843         return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
844 }
845
846 int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
847 {
848         struct mthca_mailbox *mailbox;
849         u8 info;
850         u32 *outbox;
851         int err = 0;
852
853 #define QUERY_DDR_OUT_SIZE         0x100
854 #define QUERY_DDR_START_OFFSET     0x00
855 #define QUERY_DDR_END_OFFSET       0x08
856 #define QUERY_DDR_INFO_OFFSET      0x13
857
858 #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4)
859 #define QUERY_DDR_INFO_ECC_MASK    0x3
860
861         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
862         if (IS_ERR(mailbox))
863                 return PTR_ERR(mailbox);
864         outbox = mailbox->buf;
865
866         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
867                             CMD_TIME_CLASS_A, status);
868
869         if (err)
870                 goto out;
871
872         MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET);
873         MTHCA_GET(dev->ddr_end,   outbox, QUERY_DDR_END_OFFSET);
874         MTHCA_GET(info,           outbox, QUERY_DDR_INFO_OFFSET);
875
876         if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) !=
877             !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
878
879                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory "
880                            "is %s hidden; does not match PCI config\n",
881                            (info & QUERY_DDR_INFO_HIDDEN_FLAG) ?
882                            "" : "not"));
883         }
884         if (info & QUERY_DDR_INFO_HIDDEN_FLAG)
885                 HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("HCA-attached memory is hidden.\n"));
886
887         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n",
888                   (int) ((dev->ddr_end - dev->ddr_start) >> 10),
889                   (u64) dev->ddr_start,
890                   (u64) dev->ddr_end));
891
892 out:
893         mthca_free_mailbox(dev, mailbox);
894         return err;
895 }
896
897 int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
898                         struct mthca_dev_lim *dev_lim, u8 *status)
899 {
900         struct mthca_mailbox *mailbox;
901         u32 *outbox;
902         u8 field;
903         u16 size;
904         int err;
905
906 #define QUERY_DEV_LIM_OUT_SIZE             0x100
907 #define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET     0x10
908 #define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET      0x11
909 #define QUERY_DEV_LIM_RSVD_QP_OFFSET        0x12
910 #define QUERY_DEV_LIM_MAX_QP_OFFSET         0x13
911 #define QUERY_DEV_LIM_RSVD_SRQ_OFFSET       0x14
912 #define QUERY_DEV_LIM_MAX_SRQ_OFFSET        0x15
913 #define QUERY_DEV_LIM_RSVD_EEC_OFFSET       0x16
914 #define QUERY_DEV_LIM_MAX_EEC_OFFSET        0x17
915 #define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET      0x19
916 #define QUERY_DEV_LIM_RSVD_CQ_OFFSET        0x1a
917 #define QUERY_DEV_LIM_MAX_CQ_OFFSET         0x1b
918 #define QUERY_DEV_LIM_MAX_MPT_OFFSET        0x1d
919 #define QUERY_DEV_LIM_RSVD_EQ_OFFSET        0x1e
920 #define QUERY_DEV_LIM_MAX_EQ_OFFSET         0x1f
921 #define QUERY_DEV_LIM_RSVD_MTT_OFFSET       0x20
922 #define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET     0x21
923 #define QUERY_DEV_LIM_RSVD_MRW_OFFSET       0x22
924 #define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET    0x23
925 #define QUERY_DEV_LIM_MAX_AV_OFFSET         0x27
926 #define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET     0x29
927 #define QUERY_DEV_LIM_MAX_RES_QP_OFFSET     0x2b
928 #define QUERY_DEV_LIM_MAX_RDMA_OFFSET       0x2f
929 #define QUERY_DEV_LIM_RSZ_SRQ_OFFSET        0x33
930 #define QUERY_DEV_LIM_ACK_DELAY_OFFSET      0x35
931 #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET      0x36
932 #define QUERY_DEV_LIM_VL_PORT_OFFSET        0x37
933 #define QUERY_DEV_LIM_MAX_GID_OFFSET        0x3b
934 #define QUERY_DEV_LIM_MAX_PKEY_OFFSET       0x3f
935 #define QUERY_DEV_LIM_FLAGS_OFFSET          0x44
936 #define QUERY_DEV_LIM_RSVD_UAR_OFFSET       0x48
937 #define QUERY_DEV_LIM_UAR_SZ_OFFSET         0x49
938 #define QUERY_DEV_LIM_PAGE_SZ_OFFSET        0x4b
939 #define QUERY_DEV_LIM_MAX_SG_OFFSET         0x51
940 #define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET    0x52
941 #define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET      0x55
942 #define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56
943 #define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET     0x61
944 #define QUERY_DEV_LIM_RSVD_MCG_OFFSET       0x62
945 #define QUERY_DEV_LIM_MAX_MCG_OFFSET        0x63
946 #define QUERY_DEV_LIM_RSVD_PD_OFFSET        0x64
947 #define QUERY_DEV_LIM_MAX_PD_OFFSET         0x65
948 #define QUERY_DEV_LIM_RSVD_RDD_OFFSET       0x66
949 #define QUERY_DEV_LIM_MAX_RDD_OFFSET        0x67
950 #define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET   0x80
951 #define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET   0x82
952 #define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET  0x84
953 #define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET  0x86
954 #define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET   0x88
955 #define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET   0x8a
956 #define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET   0x8c
957 #define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET   0x8e
958 #define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET   0x90
959 #define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET   0x92
960 #define QUERY_DEV_LIM_PBL_SZ_OFFSET         0x96
961 #define QUERY_DEV_LIM_BMME_FLAGS_OFFSET     0x97
962 #define QUERY_DEV_LIM_RSVD_LKEY_OFFSET      0x98
963 #define QUERY_DEV_LIM_LAMR_OFFSET           0x9f
964 #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET     0xa0
965
966         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
967         if (IS_ERR(mailbox))
968                 return PTR_ERR(mailbox);
969         outbox = mailbox->buf;
970
971         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
972                             CMD_TIME_CLASS_A, status);
973
974         if (err)
975                 goto out;
976
977         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET);
978         dev_lim->reserved_qps = 1 << (field & 0xf);
979         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET);
980         dev_lim->max_qps = 1 << (field & 0x1f);
981         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET);
982         dev_lim->reserved_srqs = 1 << (field >> 4);
983         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET);
984         dev_lim->max_srqs = 1 << (field & 0x1f);
985         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET);
986         dev_lim->reserved_eecs = 1 << (field & 0xf);
987         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET);
988         dev_lim->max_eecs = 1 << (field & 0x1f);
989         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET);
990         dev_lim->max_cq_sz = 1 << field;
991         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET);
992         dev_lim->reserved_cqs = 1 << (field & 0xf);
993         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET);
994         dev_lim->max_cqs = 1 << (field & 0x1f);
995         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET);
996         dev_lim->max_mpts = 1 << (field & 0x3f);
997         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET);
998         dev_lim->reserved_eqs = 1 << (field & 0xf);
999         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET);
1000         dev_lim->max_eqs = 1 << (field & 0x7);
1001         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
1002         dev_lim->reserved_mtts = 1 << (field >> 4);
1003         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
1004         dev_lim->max_mrw_sz = 1 << field;
1005         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET);
1006         dev_lim->reserved_mrws = 1 << (field & 0xf);
1007         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET);
1008         dev_lim->max_mtt_seg = 1 << (field & 0x3f);
1009         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET);
1010         dev_lim->max_requester_per_qp = 1 << (field & 0x3f);
1011         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET);
1012         dev_lim->max_responder_per_qp = 1 << (field & 0x3f);
1013         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET);
1014         dev_lim->max_rdma_global = 1 << (field & 0x3f);
1015         MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET);
1016         dev_lim->local_ca_ack_delay = field & 0x1f;
1017         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET);
1018         dev_lim->max_mtu        = field >> 4;
1019         dev_lim->max_port_width = field & 0xf;
1020         MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET);
1021         dev_lim->max_vl    = field >> 4;
1022         dev_lim->num_ports = field & 0xf;
1023         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET);
1024         dev_lim->max_gids = 1 << (field & 0xf);
1025         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET);
1026         dev_lim->max_pkeys = 1 << (field & 0xf);
1027         MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
1028         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET);
1029         dev_lim->reserved_uars = field >> 4;
1030         MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET);
1031         dev_lim->uar_size = 1 << ((field & 0x3f) + 20);
1032         MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET);
1033         dev_lim->min_page_sz = 1 << field;
1034         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET);
1035         dev_lim->max_sg = field;
1036
1037         MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET);
1038         dev_lim->max_desc_sz = size;
1039
1040         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET);
1041         dev_lim->max_qp_per_mcg = 1 << field;
1042         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET);
1043         dev_lim->reserved_mgms = field & 0xf;
1044         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET);
1045         dev_lim->max_mcgs = 1 << field;
1046         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET);
1047         dev_lim->reserved_pds = field >> 4;
1048         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET);
1049         dev_lim->max_pds = 1 << (field & 0x3f);
1050         MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET);
1051         dev_lim->reserved_rdds = field >> 4;
1052         MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET);
1053         dev_lim->max_rdds = 1 << (field & 0x3f);
1054
1055         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET);
1056         dev_lim->eec_entry_sz = size;
1057         MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET);
1058         dev_lim->qpc_entry_sz = size;
1059         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET);
1060         dev_lim->eeec_entry_sz = size;
1061         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET);
1062         dev_lim->eqpc_entry_sz = size;
1063         MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET);
1064         dev_lim->eqc_entry_sz = size;
1065         MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET);
1066         dev_lim->cqc_entry_sz = size;
1067         MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET);
1068         dev_lim->srq_entry_sz = size;
1069         MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET);
1070         dev_lim->uar_scratch_entry_sz = size;
1071
1072         if (mthca_is_memfree(dev)) {
1073                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1074                 dev_lim->max_srq_sz = 1 << field;
1075                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1076                 dev_lim->max_qp_sz = 1 << field;
1077                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
1078                 dev_lim->hca.arbel.resize_srq = field & 1;
1079                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
1080                 dev_lim->max_sg = min(field, dev_lim->max_sg);
1081                 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
1082                 dev_lim->max_desc_sz = min((int)size, dev_lim->max_desc_sz);            
1083                 MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
1084                 dev_lim->mpt_entry_sz = size;
1085                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
1086                 dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f);
1087                 MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox,
1088                           QUERY_DEV_LIM_BMME_FLAGS_OFFSET);
1089                 MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox,
1090                           QUERY_DEV_LIM_RSVD_LKEY_OFFSET);
1091                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET);
1092                 dev_lim->hca.arbel.lam_required = field & 1;
1093                 MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox,
1094                           QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET);
1095
1096                 if (dev_lim->hca.arbel.bmme_flags & 1){
1097                         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Base MM extensions: yes "
1098                                   "(flags %d, max PBL %d, rsvd L_Key %08x)\n",
1099                                   dev_lim->hca.arbel.bmme_flags,
1100                                   dev_lim->hca.arbel.max_pbl_sz,
1101                                   dev_lim->hca.arbel.reserved_lkey));
1102                 }else{
1103                         HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Base MM extensions: no\n"));
1104                 }
1105
1106                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max ICM size %I64d MB\n",
1107                           (u64) dev_lim->hca.arbel.max_icm_sz >> 20));
1108         } 
1109         else {
1110                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET);
1111                 dev_lim->max_srq_sz = (1 << field) - 1;
1112                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET);
1113                 dev_lim->max_qp_sz = (1 << field) - 1;
1114                 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
1115                 dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
1116                 dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
1117         }
1118
1119         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1120                   dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz));
1121         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1122                   dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz));
1123         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1124                   dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz));
1125         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max EQs: %d, reserved EQs: %d, entry size: %d\n",
1126                   dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz));
1127         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("reserved MPTs: %d, reserved MTTs: %d\n",
1128                   dev_lim->reserved_mrws, dev_lim->reserved_mtts));
1129         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1130                   dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars));
1131         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QP/MCG: %d, reserved MGMs: %d\n",
1132                   dev_lim->max_pds, dev_lim->reserved_mgms));
1133         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1134                   dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz));
1135
1136         HCA_PRINT(TRACE_LEVEL_VERBOSE  ,HCA_DBG_LOW  ,("Flags: %08x\n", dev_lim->flags));
1137
1138 out:
1139         mthca_free_mailbox(dev, mailbox);
1140         return err;
1141 }
1142
1143 static void get_board_id(u8 *vsd, char *board_id)
1144 {
1145         int i;
1146
1147 #define VSD_OFFSET_SIG1         0x00
1148 #define VSD_OFFSET_SIG2         0xde
1149 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1150 #define VSD_OFFSET_TS_BOARD_ID  0x20
1151
1152 #define VSD_SIGNATURE_TOPSPIN   0x5ad
1153
1154         RtlZeroMemory(board_id, MTHCA_BOARD_ID_LEN);
1155
1156         if (cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG1)) == VSD_SIGNATURE_TOPSPIN &&
1157             cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG2)) == VSD_SIGNATURE_TOPSPIN) {
1158                 strlcpy(board_id, (const char *)(vsd + VSD_OFFSET_TS_BOARD_ID), MTHCA_BOARD_ID_LEN);
1159         } else {
1160                 /*
1161                  * The board ID is a string but the firmware byte
1162                  * swaps each 4-byte word before passing it back to
1163                  * us.  Therefore we need to swab it before printing.
1164                  */
1165                 for (i = 0; i < 4; ++i)
1166                         ((u32 *) board_id)[i] =
1167                                 _byteswap_ulong(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
1168         }
1169 }
1170
1171 int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1172                         struct mthca_adapter *adapter, u8 *status)
1173 {
1174         struct mthca_mailbox *mailbox;
1175         u32 *outbox;
1176         int err;
1177
1178 #define QUERY_ADAPTER_OUT_SIZE             0x100
1179 #define QUERY_ADAPTER_VENDOR_ID_OFFSET     0x00
1180 #define QUERY_ADAPTER_DEVICE_ID_OFFSET     0x04
1181 #define QUERY_ADAPTER_REVISION_ID_OFFSET   0x08
1182 #define QUERY_ADAPTER_INTA_PIN_OFFSET      0x10
1183 #define QUERY_ADAPTER_VSD_OFFSET           0x20
1184
1185         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1186         if (IS_ERR(mailbox))
1187                 return PTR_ERR(mailbox);
1188         outbox = mailbox->buf;
1189
1190         err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1191                             CMD_TIME_CLASS_A, status);
1192
1193         if (err)
1194                 goto out;
1195
1196         MTHCA_GET(adapter->vendor_id, outbox,   QUERY_ADAPTER_VENDOR_ID_OFFSET);
1197         MTHCA_GET(adapter->device_id, outbox,   QUERY_ADAPTER_DEVICE_ID_OFFSET);
1198         MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET);
1199         MTHCA_GET(adapter->inta_pin, outbox,    QUERY_ADAPTER_INTA_PIN_OFFSET);
1200
1201         get_board_id((u8*)outbox + QUERY_ADAPTER_VSD_OFFSET,
1202                      adapter->board_id);
1203
1204 out:
1205         mthca_free_mailbox(dev, mailbox);
1206         return err;
1207 }
1208
1209 int mthca_INIT_HCA(struct mthca_dev *dev,
1210                    struct mthca_init_hca_param *param,
1211                    u8 *status)
1212 {
1213         struct mthca_mailbox *mailbox;
1214         __be32 *inbox;
1215         int err;
1216
1217 #define INIT_HCA_IN_SIZE                 0x200
1218 #define INIT_HCA_FLAGS_OFFSET            0x014
1219 #define INIT_HCA_QPC_OFFSET              0x020
1220 #define  INIT_HCA_QPC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x10)
1221 #define  INIT_HCA_LOG_QP_OFFSET          (INIT_HCA_QPC_OFFSET + 0x17)
1222 #define  INIT_HCA_EEC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x20)
1223 #define  INIT_HCA_LOG_EEC_OFFSET         (INIT_HCA_QPC_OFFSET + 0x27)
1224 #define  INIT_HCA_SRQC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x28)
1225 #define  INIT_HCA_LOG_SRQ_OFFSET         (INIT_HCA_QPC_OFFSET + 0x2f)
1226 #define  INIT_HCA_CQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x30)
1227 #define  INIT_HCA_LOG_CQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x37)
1228 #define  INIT_HCA_EQPC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x40)
1229 #define  INIT_HCA_EEEC_BASE_OFFSET       (INIT_HCA_QPC_OFFSET + 0x50)
1230 #define  INIT_HCA_EQC_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x60)
1231 #define  INIT_HCA_LOG_EQ_OFFSET          (INIT_HCA_QPC_OFFSET + 0x67)
1232 #define  INIT_HCA_RDB_BASE_OFFSET        (INIT_HCA_QPC_OFFSET + 0x70)
1233 #define INIT_HCA_UDAV_OFFSET             0x0b0
1234 #define  INIT_HCA_UDAV_LKEY_OFFSET       (INIT_HCA_UDAV_OFFSET + 0x0)
1235 #define  INIT_HCA_UDAV_PD_OFFSET         (INIT_HCA_UDAV_OFFSET + 0x4)
1236 #define INIT_HCA_MCAST_OFFSET            0x0c0
1237 #define  INIT_HCA_MC_BASE_OFFSET         (INIT_HCA_MCAST_OFFSET + 0x00)
1238 #define  INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
1239 #define  INIT_HCA_MC_HASH_SZ_OFFSET      (INIT_HCA_MCAST_OFFSET + 0x16)
1240 #define  INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1241 #define INIT_HCA_TPT_OFFSET              0x0f0
1242 #define  INIT_HCA_MPT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x00)
1243 #define  INIT_HCA_MTT_SEG_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x09)
1244 #define  INIT_HCA_LOG_MPT_SZ_OFFSET      (INIT_HCA_TPT_OFFSET + 0x0b)
1245 #define  INIT_HCA_MTT_BASE_OFFSET        (INIT_HCA_TPT_OFFSET + 0x10)
1246 #define INIT_HCA_UAR_OFFSET              0x120
1247 #define  INIT_HCA_UAR_BASE_OFFSET        (INIT_HCA_UAR_OFFSET + 0x00)
1248 #define  INIT_HCA_UARC_SZ_OFFSET         (INIT_HCA_UAR_OFFSET + 0x09)
1249 #define  INIT_HCA_LOG_UAR_SZ_OFFSET      (INIT_HCA_UAR_OFFSET + 0x0a)
1250 #define  INIT_HCA_UAR_PAGE_SZ_OFFSET     (INIT_HCA_UAR_OFFSET + 0x0b)
1251 #define  INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10)
1252 #define  INIT_HCA_UAR_CTX_BASE_OFFSET    (INIT_HCA_UAR_OFFSET + 0x18)
1253
1254         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1255         if (IS_ERR(mailbox))
1256                 return PTR_ERR(mailbox);
1257         inbox = mailbox->buf;
1258
1259         RtlZeroMemory(inbox, INIT_HCA_IN_SIZE);
1260
1261 #if defined(__LITTLE_ENDIAN)
1262         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cl_hton32(1 << 1);
1263 #elif defined(__BIG_ENDIAN)
1264         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1 << 1);
1265 #else
1266 #error Host endianness not defined
1267 #endif
1268         /* Check port for UD address vector: */
1269         *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1);
1270
1271         /* We leave wqe_quota, responder_exu, etc as 0 (default) */
1272
1273         /* QPC/EEC/CQC/EQC/RDB attributes */
1274
1275         MTHCA_PUT(inbox, param->qpc_base,     INIT_HCA_QPC_BASE_OFFSET);
1276         MTHCA_PUT(inbox, param->log_num_qps,  INIT_HCA_LOG_QP_OFFSET);
1277         MTHCA_PUT(inbox, param->eec_base,     INIT_HCA_EEC_BASE_OFFSET);
1278         MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET);
1279         MTHCA_PUT(inbox, param->srqc_base,    INIT_HCA_SRQC_BASE_OFFSET);
1280         MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1281         MTHCA_PUT(inbox, param->cqc_base,     INIT_HCA_CQC_BASE_OFFSET);
1282         MTHCA_PUT(inbox, param->log_num_cqs,  INIT_HCA_LOG_CQ_OFFSET);
1283         MTHCA_PUT(inbox, param->eqpc_base,    INIT_HCA_EQPC_BASE_OFFSET);
1284         MTHCA_PUT(inbox, param->eeec_base,    INIT_HCA_EEEC_BASE_OFFSET);
1285         MTHCA_PUT(inbox, param->eqc_base,     INIT_HCA_EQC_BASE_OFFSET);
1286         MTHCA_PUT(inbox, param->log_num_eqs,  INIT_HCA_LOG_EQ_OFFSET);
1287         MTHCA_PUT(inbox, param->rdb_base,     INIT_HCA_RDB_BASE_OFFSET);
1288
1289         /* UD AV attributes */
1290
1291         /* multicast attributes */
1292
1293         MTHCA_PUT(inbox, param->mc_base,         INIT_HCA_MC_BASE_OFFSET);
1294         MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1295         MTHCA_PUT(inbox, param->mc_hash_sz,      INIT_HCA_MC_HASH_SZ_OFFSET);
1296         MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1297
1298         /* TPT attributes */
1299
1300         MTHCA_PUT(inbox, param->mpt_base,   INIT_HCA_MPT_BASE_OFFSET);
1301         if (!mthca_is_memfree(dev))
1302                 MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
1303         MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1304         MTHCA_PUT(inbox, param->mtt_base,   INIT_HCA_MTT_BASE_OFFSET);
1305
1306         /* UAR attributes */
1307         {
1308                 u8 uar_page_sz = PAGE_SHIFT - 12;
1309                 MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1310         }
1311
1312         MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
1313
1314         if (mthca_is_memfree(dev)) {
1315                 MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
1316                 MTHCA_PUT(inbox, param->log_uar_sz,  INIT_HCA_LOG_UAR_SZ_OFFSET);
1317                 MTHCA_PUT(inbox, param->uarc_base,   INIT_HCA_UAR_CTX_BASE_OFFSET);
1318         }
1319
1320         err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status);
1321
1322         mthca_free_mailbox(dev, mailbox);
1323         return err;
1324 }
1325
1326 int mthca_INIT_IB(struct mthca_dev *dev,
1327                   struct mthca_init_ib_param *param,
1328                   int port, u8 *status)
1329 {
1330         struct mthca_mailbox *mailbox;
1331         u32 *inbox;
1332         int err;
1333         u32 flags;
1334
1335 #define INIT_IB_IN_SIZE                                         56
1336 #define INIT_IB_FLAGS_OFFSET                    0x00
1337 #define INIT_IB_FLAG_SIG                                        (1 << 18)
1338 #define INIT_IB_FLAG_NG                                         (1 << 17)
1339 #define INIT_IB_FLAG_G0                                         (1 << 16)
1340 #define INIT_IB_VL_SHIFT                                        4
1341 #define INIT_IB_PORT_WIDTH_SHIFT        8
1342 #define INIT_IB_MTU_SHIFT                               12
1343 #define INIT_IB_MAX_GID_OFFSET                  0x06
1344 #define INIT_IB_MAX_PKEY_OFFSET         0x0a
1345 #define INIT_IB_GUID0_OFFSET                    0x10
1346 #define INIT_IB_NODE_GUID_OFFSET        0x18
1347 #define INIT_IB_SI_GUID_OFFSET                  0x20
1348
1349         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1350         if (IS_ERR(mailbox))
1351                 return PTR_ERR(mailbox);
1352         inbox = mailbox->buf;
1353
1354         RtlZeroMemory(inbox, INIT_IB_IN_SIZE);
1355
1356         flags = 0;
1357         flags |= param->set_guid0     ? INIT_IB_FLAG_G0  : 0;
1358         flags |= param->set_node_guid ? INIT_IB_FLAG_NG  : 0;
1359         flags |= param->set_si_guid   ? INIT_IB_FLAG_SIG : 0;
1360         flags |= param->vl_cap << INIT_IB_VL_SHIFT;
1361         flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT;
1362         flags |= param->mtu_cap << INIT_IB_MTU_SHIFT;
1363         MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET);
1364
1365         MTHCA_PUT(inbox, param->gid_cap,   INIT_IB_MAX_GID_OFFSET);
1366         MTHCA_PUT(inbox, param->pkey_cap,  INIT_IB_MAX_PKEY_OFFSET);
1367         MTHCA_PUT(inbox, param->guid0,     INIT_IB_GUID0_OFFSET);
1368         MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET);
1369         MTHCA_PUT(inbox, param->si_guid,   INIT_IB_SI_GUID_OFFSET);
1370
1371         err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1372                         CMD_TIME_CLASS_A, status);
1373
1374         mthca_free_mailbox(dev, mailbox);
1375         return err;
1376 }
1377
1378 int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
1379 {
1380         return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status);
1381 }
1382
1383 int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
1384 {
1385         return mthca_cmd(dev, 0, 0, (u8)panic, CMD_CLOSE_HCA, HZ, status);
1386 }
1387
1388 int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1389                  int port, u8 *status)
1390 {
1391         struct mthca_mailbox *mailbox;
1392         u32 *inbox;
1393         int err;
1394         u32 flags = 0;
1395
1396 #define SET_IB_IN_SIZE         0x40
1397 #define SET_IB_FLAGS_OFFSET    0x00
1398 #define SET_IB_FLAG_SIG        (1 << 18)
1399 #define SET_IB_FLAG_RQK        (1 <<  0)
1400 #define SET_IB_CAP_MASK_OFFSET 0x04
1401 #define SET_IB_SI_GUID_OFFSET  0x08
1402
1403         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1404         if (IS_ERR(mailbox))
1405                 return PTR_ERR(mailbox);
1406         inbox = mailbox->buf;
1407
1408         RtlZeroMemory(inbox, SET_IB_IN_SIZE);
1409
1410         flags |= param->set_si_guid     ? SET_IB_FLAG_SIG : 0;
1411         flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0;
1412         MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET);
1413
1414         MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET);
1415         MTHCA_PUT(inbox, param->si_guid,  SET_IB_SI_GUID_OFFSET);
1416
1417         err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1418                         CMD_TIME_CLASS_B, status);
1419
1420         mthca_free_mailbox(dev, mailbox);
1421         return err;
1422 }
1423
1424 int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
1425 {
1426         return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
1427 }
1428
1429 int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
1430 {
1431         struct mthca_mailbox *mailbox;
1432         __be64 *inbox;
1433         int err;
1434
1435         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1436         if (IS_ERR(mailbox))
1437                 return PTR_ERR(mailbox);
1438         inbox = mailbox->buf;
1439
1440         inbox[0] = cl_hton64(virt);
1441         inbox[1] = cl_hton64(dma_addr);
1442
1443         err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1444                         CMD_TIME_CLASS_B, status);
1445
1446         mthca_free_mailbox(dev, mailbox);
1447
1448         if (!err)
1449                 HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped page at %I64x to %I64x for ICM.\n",
1450                           (u64) dma_addr, (u64) virt));
1451
1452         return err;
1453 }
1454
1455 int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
1456 {
1457         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Unmapping %d pages at %I64x from ICM.\n",
1458                   page_count, (u64) virt));
1459
1460         return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
1461 }
1462
1463 int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
1464 {
1465         return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, (u64)-1, status);
1466 }
1467
1468 int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
1469 {
1470         return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
1471 }
1472
1473 int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1474                        u8 *status)
1475 {
1476         int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
1477                                 CMD_TIME_CLASS_A, status);
1478
1479         if (ret || status)
1480                 return ret;
1481
1482         /*
1483          * Arbel page size is always 4 KB; round up number of system
1484          * pages needed.
1485          */
1486         *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
1487         *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
1488
1489         return 0;
1490 }
1491
1492 int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1493                     int mpt_index, u8 *status)
1494 {
1495         return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1496                          CMD_TIME_CLASS_B, status);
1497 }
1498
1499 int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1500                     int mpt_index, u8 *status)
1501 {
1502         return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1503                              (u8)!mailbox, CMD_HW2SW_MPT,
1504                              CMD_TIME_CLASS_B, status);
1505 }
1506
1507 int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1508                     int num_mtt, u8 *status)
1509 {
1510         return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1511                          CMD_TIME_CLASS_B, status);
1512 }
1513
1514 int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
1515 {
1516         return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status);
1517 }
1518
1519 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1520                  int eq_num, u8 *status)
1521 {
1522         HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("%s mask %016I64x for eqn %d\n",
1523                   unmap ? "Clearing" : "Setting",
1524                   (u64) event_mask, eq_num));
1525         return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
1526                          0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
1527 }
1528
1529 int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1530                    int eq_num, u8 *status)
1531 {
1532         return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1533                          CMD_TIME_CLASS_A, status);
1534 }
1535
1536 int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1537                    int eq_num, u8 *status)
1538 {
1539         return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1540                              CMD_HW2SW_EQ,
1541                              CMD_TIME_CLASS_A, status);
1542 }
1543
1544 int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1545                    int cq_num, u8 *status)
1546 {
1547         return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1548                         CMD_TIME_CLASS_A, status);
1549 }
1550
1551 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1552                    int cq_num, u8 *status)
1553 {
1554         return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1555                              CMD_HW2SW_CQ,
1556                              CMD_TIME_CLASS_A, status);
1557 }
1558
1559 int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1560                     int srq_num, u8 *status)
1561 {
1562         return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1563                         CMD_TIME_CLASS_A, status);
1564 }
1565
1566 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1567                     int srq_num, u8 *status)
1568 {
1569         return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1570                              CMD_HW2SW_SRQ,
1571                              CMD_TIME_CLASS_A, status);
1572 }
1573
1574 int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
1575 {
1576         return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1577                          CMD_TIME_CLASS_B, status);
1578 }
1579
1580 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
1581                     int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
1582                     u8 *status)
1583 {
1584         enum {
1585                 MTHCA_TRANS_INVALID = 0,
1586                 MTHCA_TRANS_RST2INIT,
1587                 MTHCA_TRANS_INIT2INIT,
1588                 MTHCA_TRANS_INIT2RTR,
1589                 MTHCA_TRANS_RTR2RTS,
1590                 MTHCA_TRANS_RTS2RTS,
1591                 MTHCA_TRANS_SQERR2RTS,
1592                 MTHCA_TRANS_ANY2ERR,
1593                 MTHCA_TRANS_RTS2SQD,
1594                 MTHCA_TRANS_SQD2SQD,
1595                 MTHCA_TRANS_SQD2RTS,
1596                 MTHCA_TRANS_ANY2RST,
1597         };
1598         static const u16 op[] = {
1599                 0,                                                                                      /* MTHCA_TRANS_INVALID */
1600                 CMD_RST2INIT_QPEE,              /* MTHCA_TRANS_RST2INIT */
1601                 CMD_INIT2INIT_QPEE,             /* MTHCA_TRANS_INIT2INIT */
1602                 CMD_INIT2RTR_QPEE,              /* MTHCA_TRANS_INIT2RTR */
1603                 CMD_RTR2RTS_QPEE,               /* MTHCA_TRANS_RTR2RTS */
1604                 CMD_RTS2RTS_QPEE,               /* MTHCA_TRANS_RTS2RTS */
1605                 CMD_SQERR2RTS_QPEE,     /* MTHCA_TRANS_SQERR2RTS */
1606                 CMD_2ERR_QPEE,                          /* MTHCA_TRANS_ANY2ERR */
1607                 CMD_RTS2SQD_QPEE,               /* MTHCA_TRANS_RTS2SQD */
1608                 CMD_SQD2SQD_QPEE,               /* MTHCA_TRANS_SQD2SQD */
1609                 CMD_SQD2RTS_QPEE,               /* MTHCA_TRANS_SQD2RTS */
1610                 CMD_ERR2RST_QPEE                        /* MTHCA_TRANS_ANY2RST */
1611         };
1612         u8 op_mod = 0;
1613         int my_mailbox = 0;
1614         int err;
1615
1616         UNREFERENCED_PARAMETER(optmask);
1617
1618         if (trans < 0 || trans >= ARRAY_SIZE(op))
1619                 return -EINVAL;
1620
1621         if (trans == MTHCA_TRANS_ANY2RST) {
1622                 op_mod = 3;     /* don't write outbox, any->reset */
1623
1624                 /* For debugging */
1625                 if (!mailbox) {
1626                         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1627                         if (!IS_ERR(mailbox)) {
1628                                 my_mailbox = 1;
1629                                 op_mod     = 2; /* write outbox, any->reset */
1630                         } else
1631                                 mailbox = NULL;
1632                 }
1633         } else {
1634                 { // debug print
1635                         int i;
1636                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
1637                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  opt param mask: %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf)));
1638                         for (i = 2; i < 0x100 / 4; i=i+4) {
1639                                 HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  [%02x] %08x %08x %08x %08x\n",i-2,
1640                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
1641                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
1642                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
1643                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
1644                         }
1645                 }
1646         }
1647
1648         if (trans == MTHCA_TRANS_ANY2RST) {
1649                 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1650                                     (!!is_ee << 24) | num, op_mod,
1651                                     op[trans], CMD_TIME_CLASS_C, status);
1652
1653                 if (mailbox) { // debug print
1654                         int i;
1655                         HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n"));
1656                         for (i = 2; i < 0x100 / 4; i=i+4) {
1657                                 HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("  [%02x] %08x %08x %08x %08x\n",i-2,
1658                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i ]),
1659                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]),
1660                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]),
1661                                                         cl_ntoh32(((__be32 *) mailbox->buf)[i + 3])));
1662                         }
1663                 }
1664         } else
1665                 err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
1666                                 op_mod, op[trans], CMD_TIME_CLASS_C, status);
1667
1668         if (my_mailbox)
1669                 mthca_free_mailbox(dev, mailbox);
1670
1671         return err;
1672 }
1673
1674 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1675                    struct mthca_mailbox *mailbox, u8 *status)
1676 {
1677         return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1678                              CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
1679 }
1680
1681 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
1682                           u8 *status)
1683 {
1684         u8 op_mod;
1685
1686         switch (type) {
1687         case IB_QPT_QP0:
1688                 op_mod = 0;
1689                 break;
1690         case IB_QPT_QP1:
1691                 op_mod = 1;
1692                 break;
1693         case IB_QPT_RAW_IPV6:
1694                 op_mod = 2;
1695                 break;
1696         case IB_QPT_RAW_ETHER:
1697                 op_mod = 3;
1698                 break;
1699         default:
1700                 return -EINVAL;
1701         }
1702
1703         return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
1704                          CMD_TIME_CLASS_B, status);
1705 }
1706
1707 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1708                   int port, struct _ib_wc *in_wc, struct ib_grh *in_grh,
1709                   void *in_mad, void *response_mad, u8 *status)
1710 {
1711         struct mthca_mailbox *inmailbox, *outmailbox;
1712         u8 *inbox;
1713         int err;
1714         u32 in_modifier = port;
1715         u8 op_modifier = 0;
1716
1717         ASSERT( !in_wc );
1718         UNREFERENCED_PARAMETER( in_grh );
1719
1720 #define MAD_IFC_BOX_SIZE      0x400
1721 #define MAD_IFC_MY_QPN_OFFSET 0x100
1722 #define MAD_IFC_RQPN_OFFSET   0x104
1723 #define MAD_IFC_SL_OFFSET     0x108
1724 #define MAD_IFC_G_PATH_OFFSET 0x109
1725 #define MAD_IFC_RLID_OFFSET   0x10a
1726 #define MAD_IFC_PKEY_OFFSET   0x10e
1727 #define MAD_IFC_GRH_OFFSET    0x140
1728
1729         inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1730         if (IS_ERR(inmailbox))
1731                 return PTR_ERR(inmailbox);
1732         inbox = inmailbox->buf;
1733
1734         outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
1735         if (IS_ERR(outmailbox)) {
1736                 mthca_free_mailbox(dev, inmailbox);
1737                 return PTR_ERR(outmailbox);
1738         }
1739
1740         memcpy(inbox, in_mad, 256);
1741
1742         /*
1743          * Key check traps can't be generated unless we have in_wc to
1744          * tell us where to send the trap.
1745          */
1746         if (ignore_mkey || !in_wc)
1747                 op_modifier |= 0x1;
1748         if (ignore_bkey || !in_wc)
1749                 op_modifier |= 0x2;
1750
1751         err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1752                             in_modifier, op_modifier,
1753                             CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
1754
1755         if (!err && !*status)
1756                 memcpy(response_mad, outmailbox->buf, 256);
1757
1758         mthca_free_mailbox(dev, inmailbox);
1759         mthca_free_mailbox(dev, outmailbox);
1760         return err;
1761 }
1762
1763 int mthca_READ_MGM(struct mthca_dev *dev, int index,
1764                    struct mthca_mailbox *mailbox, u8 *status)
1765 {
1766         return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1767                              CMD_READ_MGM, CMD_TIME_CLASS_A, status);
1768 }
1769
1770 int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1771                     struct mthca_mailbox *mailbox, u8 *status)
1772 {
1773         return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1774                          CMD_TIME_CLASS_A, status);
1775 }
1776
1777 int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1778                     u16 *hash, u8 *status)
1779 {
1780         u64 imm;
1781         int err;
1782
1783         err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1784                             CMD_TIME_CLASS_A, status);
1785
1786         *hash = (u16)imm;
1787         return err;
1788 }
1789
1790 int mthca_NOP(struct mthca_dev *dev, u8 *status)
1791 {
1792         return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, 100000, status);     /* 100 msecs */
1793 }