2 * Copyright (C) 2008 Michael Brown <mbrown@fensystems.co.uk>.
3 * Copyright (C) 2008 Mellanox Technologies Ltd.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of the
8 * License, or any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #include <gpxe/malloc.h>
31 #include <gpxe/umalloc.h>
32 #include <gpxe/iobuf.h>
33 #include <gpxe/netdevice.h>
34 #include <gpxe/infiniband.h>
40 * Mellanox Hermon Infiniband HCA
44 /***************************************************************************
46 * Queue number allocation
48 ***************************************************************************
52 * Allocate offsets within usage bitmask
54 * @v bits Usage bitmask
55 * @v bits_len Length of usage bitmask
56 * @v num_bits Number of contiguous bits to allocate within bitmask
57 * @ret bit First free bit within bitmask, or negative error
59 static int hermon_bitmask_alloc ( hermon_bitmask_t *bits,
60 unsigned int bits_len,
61 unsigned int num_bits ) {
63 hermon_bitmask_t mask = 1;
64 unsigned int found = 0;
66 /* Search bits for num_bits contiguous free bits */
67 while ( bit < bits_len ) {
68 if ( ( mask & *bits ) == 0 ) {
69 if ( ++found == num_bits )
75 mask = ( mask << 1 ) | ( mask >> ( 8 * sizeof ( mask ) - 1 ) );
82 /* Mark bits as in-use */
87 mask = ( mask >> 1 ) | ( mask << ( 8 * sizeof ( mask ) - 1 ) );
90 return ( bit - num_bits + 1 );
94 * Free offsets within usage bitmask
96 * @v bits Usage bitmask
97 * @v bit Starting bit within bitmask
98 * @v num_bits Number of contiguous bits to free within bitmask
100 static void hermon_bitmask_free ( hermon_bitmask_t *bits,
101 int bit, unsigned int num_bits ) {
102 hermon_bitmask_t mask;
104 for ( ; num_bits ; bit++, num_bits-- ) {
105 mask = ( 1 << ( bit % ( 8 * sizeof ( mask ) ) ) );
106 bits[ ( bit / ( 8 * sizeof ( mask ) ) ) ] &= ~mask;
110 /***************************************************************************
114 ***************************************************************************
118 * Wait for Hermon command completion
120 * @v hermon Hermon device
121 * @v hcr HCA command registers
122 * @ret rc Return status code
124 static int hermon_cmd_wait ( struct hermon *hermon,
125 struct hermonprm_hca_command_register *hcr ) {
128 for ( wait = HERMON_HCR_MAX_WAIT_MS ; wait ; wait-- ) {
130 readl ( hermon->config + HERMON_HCR_REG ( 6 ) );
131 if ( ( MLX_GET ( hcr, go ) == 0 ) &&
132 ( MLX_GET ( hcr, t ) == hermon->toggle ) )
142 * @v hermon Hermon device
143 * @v command Command opcode, flags and input/output lengths
144 * @v op_mod Opcode modifier (0 if no modifier applicable)
145 * @v in Input parameters
146 * @v in_mod Input modifier (0 if no modifier applicable)
147 * @v out Output parameters
148 * @ret rc Return status code
150 static int hermon_cmd ( struct hermon *hermon, unsigned long command,
151 unsigned int op_mod, const void *in,
152 unsigned int in_mod, void *out ) {
153 struct hermonprm_hca_command_register hcr;
154 unsigned int opcode = HERMON_HCR_OPCODE ( command );
155 size_t in_len = HERMON_HCR_IN_LEN ( command );
156 size_t out_len = HERMON_HCR_OUT_LEN ( command );
163 assert ( in_len <= HERMON_MBOX_SIZE );
164 assert ( out_len <= HERMON_MBOX_SIZE );
166 DBGC2 ( hermon, "Hermon %p command %02x in %zx%s out %zx%s\n",
167 hermon, opcode, in_len,
168 ( ( command & HERMON_HCR_IN_MBOX ) ? "(mbox)" : "" ), out_len,
169 ( ( command & HERMON_HCR_OUT_MBOX ) ? "(mbox)" : "" ) );
171 /* Check that HCR is free */
172 if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
173 DBGC ( hermon, "Hermon %p command interface locked\n",
178 /* Flip HCR toggle */
179 hermon->toggle = ( 1 - hermon->toggle );
182 memset ( &hcr, 0, sizeof ( hcr ) );
183 in_buffer = &hcr.u.dwords[0];
184 if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
185 in_buffer = hermon->mailbox_in;
186 MLX_FILL_1 ( &hcr, 1, in_param_l, virt_to_bus ( in_buffer ) );
188 memcpy ( in_buffer, in, in_len );
189 MLX_FILL_1 ( &hcr, 2, input_modifier, in_mod );
190 out_buffer = &hcr.u.dwords[3];
191 if ( out_len && ( command & HERMON_HCR_OUT_MBOX ) ) {
192 out_buffer = hermon->mailbox_out;
193 MLX_FILL_1 ( &hcr, 4, out_param_l,
194 virt_to_bus ( out_buffer ) );
196 MLX_FILL_4 ( &hcr, 6,
198 opcode_modifier, op_mod,
201 DBGC ( hermon, "Hermon %p issuing command:\n", hermon );
202 DBGC_HDA ( hermon, virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
203 &hcr, sizeof ( hcr ) );
204 if ( in_len && ( command & HERMON_HCR_IN_MBOX ) ) {
205 DBGC2 ( hermon, "Input mailbox:\n" );
206 DBGC2_HDA ( hermon, virt_to_phys ( in_buffer ), in_buffer,
207 ( ( in_len < 512 ) ? in_len : 512 ) );
211 for ( i = 0 ; i < ( sizeof ( hcr ) / sizeof ( hcr.u.dwords[0] ) ) ;
213 writel ( hcr.u.dwords[i],
214 hermon->config + HERMON_HCR_REG ( i ) );
218 /* Wait for command completion */
219 if ( ( rc = hermon_cmd_wait ( hermon, &hcr ) ) != 0 ) {
220 DBGC ( hermon, "Hermon %p timed out waiting for command:\n",
223 virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
224 &hcr, sizeof ( hcr ) );
228 /* Check command status */
229 status = MLX_GET ( &hcr, status );
231 DBGC ( hermon, "Hermon %p command failed with status %02x:\n",
234 virt_to_phys ( hermon->config + HERMON_HCR_BASE ),
235 &hcr, sizeof ( hcr ) );
239 /* Read output parameters, if any */
240 hcr.u.dwords[3] = readl ( hermon->config + HERMON_HCR_REG ( 3 ) );
241 hcr.u.dwords[4] = readl ( hermon->config + HERMON_HCR_REG ( 4 ) );
242 memcpy ( out, out_buffer, out_len );
244 DBGC2 ( hermon, "Output%s:\n",
245 ( command & HERMON_HCR_OUT_MBOX ) ? " mailbox" : "" );
246 DBGC2_HDA ( hermon, virt_to_phys ( out_buffer ), out_buffer,
247 ( ( out_len < 512 ) ? out_len : 512 ) );
254 hermon_cmd_query_dev_cap ( struct hermon *hermon,
255 struct hermonprm_query_dev_cap *dev_cap ) {
256 return hermon_cmd ( hermon,
257 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_DEV_CAP,
258 1, sizeof ( *dev_cap ) ),
259 0, NULL, 0, dev_cap );
263 hermon_cmd_query_fw ( struct hermon *hermon, struct hermonprm_query_fw *fw ) {
264 return hermon_cmd ( hermon,
265 HERMON_HCR_OUT_CMD ( HERMON_HCR_QUERY_FW,
271 hermon_cmd_init_hca ( struct hermon *hermon,
272 const struct hermonprm_init_hca *init_hca ) {
273 return hermon_cmd ( hermon,
274 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_HCA,
275 1, sizeof ( *init_hca ) ),
276 0, init_hca, 0, NULL );
280 hermon_cmd_close_hca ( struct hermon *hermon ) {
281 return hermon_cmd ( hermon,
282 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_HCA ),
287 hermon_cmd_init_port ( struct hermon *hermon, unsigned int port,
288 const struct hermonprm_init_port *init_port ) {
289 return hermon_cmd ( hermon,
290 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT_PORT,
291 1, sizeof ( *init_port ) ),
292 0, init_port, port, NULL );
296 hermon_cmd_close_port ( struct hermon *hermon, unsigned int port ) {
297 return hermon_cmd ( hermon,
298 HERMON_HCR_VOID_CMD ( HERMON_HCR_CLOSE_PORT ),
299 0, NULL, port, NULL );
303 hermon_cmd_sw2hw_mpt ( struct hermon *hermon, unsigned int index,
304 const struct hermonprm_mpt *mpt ) {
305 return hermon_cmd ( hermon,
306 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_MPT,
307 1, sizeof ( *mpt ) ),
308 0, mpt, index, NULL );
312 hermon_cmd_write_mtt ( struct hermon *hermon,
313 const struct hermonprm_write_mtt *write_mtt ) {
314 return hermon_cmd ( hermon,
315 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MTT,
316 1, sizeof ( *write_mtt ) ),
317 0, write_mtt, 1, NULL );
321 hermon_cmd_sw2hw_eq ( struct hermon *hermon, unsigned int index,
322 const struct hermonprm_eqc *eqc ) {
323 return hermon_cmd ( hermon,
324 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_EQ,
325 1, sizeof ( *eqc ) ),
326 0, eqc, index, NULL );
330 hermon_cmd_hw2sw_eq ( struct hermon *hermon, unsigned int index ) {
331 return hermon_cmd ( hermon,
332 HERMON_HCR_VOID_CMD ( HERMON_HCR_HW2SW_EQ ),
333 1, NULL, index, NULL );
337 hermon_cmd_sw2hw_cq ( struct hermon *hermon, unsigned long cqn,
338 const struct hermonprm_completion_queue_context *cqctx ){
339 return hermon_cmd ( hermon,
340 HERMON_HCR_IN_CMD ( HERMON_HCR_SW2HW_CQ,
341 1, sizeof ( *cqctx ) ),
342 0, cqctx, cqn, NULL );
346 hermon_cmd_hw2sw_cq ( struct hermon *hermon, unsigned long cqn,
347 struct hermonprm_completion_queue_context *cqctx) {
348 return hermon_cmd ( hermon,
349 HERMON_HCR_OUT_CMD ( HERMON_HCR_HW2SW_CQ,
350 1, sizeof ( *cqctx ) ),
351 0, NULL, cqn, cqctx );
355 hermon_cmd_rst2init_qp ( struct hermon *hermon, unsigned long qpn,
356 const struct hermonprm_qp_ee_state_transitions *ctx ){
357 return hermon_cmd ( hermon,
358 HERMON_HCR_IN_CMD ( HERMON_HCR_RST2INIT_QP,
359 1, sizeof ( *ctx ) ),
364 hermon_cmd_init2rtr_qp ( struct hermon *hermon, unsigned long qpn,
365 const struct hermonprm_qp_ee_state_transitions *ctx ){
366 return hermon_cmd ( hermon,
367 HERMON_HCR_IN_CMD ( HERMON_HCR_INIT2RTR_QP,
368 1, sizeof ( *ctx ) ),
373 hermon_cmd_rtr2rts_qp ( struct hermon *hermon, unsigned long qpn,
374 const struct hermonprm_qp_ee_state_transitions *ctx ) {
375 return hermon_cmd ( hermon,
376 HERMON_HCR_IN_CMD ( HERMON_HCR_RTR2RTS_QP,
377 1, sizeof ( *ctx ) ),
382 hermon_cmd_2rst_qp ( struct hermon *hermon, unsigned long qpn ) {
383 return hermon_cmd ( hermon,
384 HERMON_HCR_VOID_CMD ( HERMON_HCR_2RST_QP ),
385 0x03, NULL, qpn, NULL );
389 hermon_cmd_mad_ifc ( struct hermon *hermon, unsigned int port,
390 union hermonprm_mad *mad ) {
391 return hermon_cmd ( hermon,
392 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MAD_IFC,
394 1, sizeof ( *mad ) ),
395 0x03, mad, port, mad );
399 hermon_cmd_read_mcg ( struct hermon *hermon, unsigned int index,
400 struct hermonprm_mcg_entry *mcg ) {
401 return hermon_cmd ( hermon,
402 HERMON_HCR_OUT_CMD ( HERMON_HCR_READ_MCG,
403 1, sizeof ( *mcg ) ),
404 0, NULL, index, mcg );
408 hermon_cmd_write_mcg ( struct hermon *hermon, unsigned int index,
409 const struct hermonprm_mcg_entry *mcg ) {
410 return hermon_cmd ( hermon,
411 HERMON_HCR_IN_CMD ( HERMON_HCR_WRITE_MCG,
412 1, sizeof ( *mcg ) ),
413 0, mcg, index, NULL );
417 hermon_cmd_mgid_hash ( struct hermon *hermon, const struct ib_gid *gid,
418 struct hermonprm_mgm_hash *hash ) {
419 return hermon_cmd ( hermon,
420 HERMON_HCR_INOUT_CMD ( HERMON_HCR_MGID_HASH,
422 0, sizeof ( *hash ) ),
427 hermon_cmd_run_fw ( struct hermon *hermon ) {
428 return hermon_cmd ( hermon,
429 HERMON_HCR_VOID_CMD ( HERMON_HCR_RUN_FW ),
434 hermon_cmd_unmap_icm ( struct hermon *hermon, unsigned int page_count,
435 const struct hermonprm_scalar_parameter *offset ) {
436 return hermon_cmd ( hermon,
437 HERMON_HCR_IN_CMD ( HERMON_HCR_UNMAP_ICM,
438 0, sizeof ( *offset ) ),
439 0, offset, page_count, NULL );
443 hermon_cmd_map_icm ( struct hermon *hermon,
444 const struct hermonprm_virtual_physical_mapping *map ) {
445 return hermon_cmd ( hermon,
446 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM,
447 1, sizeof ( *map ) ),
452 hermon_cmd_unmap_icm_aux ( struct hermon *hermon ) {
453 return hermon_cmd ( hermon,
454 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_ICM_AUX ),
459 hermon_cmd_map_icm_aux ( struct hermon *hermon,
460 const struct hermonprm_virtual_physical_mapping *map ) {
461 return hermon_cmd ( hermon,
462 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_ICM_AUX,
463 1, sizeof ( *map ) ),
468 hermon_cmd_set_icm_size ( struct hermon *hermon,
469 const struct hermonprm_scalar_parameter *icm_size,
470 struct hermonprm_scalar_parameter *icm_aux_size ) {
471 return hermon_cmd ( hermon,
472 HERMON_HCR_INOUT_CMD ( HERMON_HCR_SET_ICM_SIZE,
473 0, sizeof ( *icm_size ),
474 0, sizeof (*icm_aux_size) ),
475 0, icm_size, 0, icm_aux_size );
479 hermon_cmd_unmap_fa ( struct hermon *hermon ) {
480 return hermon_cmd ( hermon,
481 HERMON_HCR_VOID_CMD ( HERMON_HCR_UNMAP_FA ),
486 hermon_cmd_map_fa ( struct hermon *hermon,
487 const struct hermonprm_virtual_physical_mapping *map ) {
488 return hermon_cmd ( hermon,
489 HERMON_HCR_IN_CMD ( HERMON_HCR_MAP_FA,
490 1, sizeof ( *map ) ),
494 /***************************************************************************
496 * Memory translation table operations
498 ***************************************************************************
502 * Allocate MTT entries
504 * @v hermon Hermon device
505 * @v memory Memory to map into MTT
506 * @v len Length of memory to map
507 * @v mtt MTT descriptor to fill in
508 * @ret rc Return status code
510 static int hermon_alloc_mtt ( struct hermon *hermon,
511 const void *memory, size_t len,
512 struct hermon_mtt *mtt ) {
513 struct hermonprm_write_mtt write_mtt;
515 unsigned int page_offset;
516 unsigned int num_pages;
518 unsigned int mtt_base_addr;
522 /* Find available MTT entries */
523 start = virt_to_phys ( memory );
524 page_offset = ( start & ( HERMON_PAGE_SIZE - 1 ) );
525 start -= page_offset;
527 num_pages = ( ( len + HERMON_PAGE_SIZE - 1 ) / HERMON_PAGE_SIZE );
528 mtt_offset = hermon_bitmask_alloc ( hermon->mtt_inuse, HERMON_MAX_MTTS,
530 if ( mtt_offset < 0 ) {
531 DBGC ( hermon, "Hermon %p could not allocate %d MTT entries\n",
536 mtt_base_addr = ( ( hermon->cap.reserved_mtts + mtt_offset ) *
537 hermon->cap.mtt_entry_size );
539 /* Fill in MTT structure */
540 mtt->mtt_offset = mtt_offset;
541 mtt->num_pages = num_pages;
542 mtt->mtt_base_addr = mtt_base_addr;
543 mtt->page_offset = page_offset;
545 /* Construct and issue WRITE_MTT commands */
546 for ( i = 0 ; i < num_pages ; i++ ) {
547 memset ( &write_mtt, 0, sizeof ( write_mtt ) );
548 MLX_FILL_1 ( &write_mtt.mtt_base_addr, 1,
549 value, mtt_base_addr );
550 MLX_FILL_2 ( &write_mtt.mtt, 1,
552 ptag_l, ( start >> 3 ) );
553 if ( ( rc = hermon_cmd_write_mtt ( hermon,
554 &write_mtt ) ) != 0 ) {
555 DBGC ( hermon, "Hermon %p could not write MTT at %x\n",
556 hermon, mtt_base_addr );
559 start += HERMON_PAGE_SIZE;
560 mtt_base_addr += hermon->cap.mtt_entry_size;
566 hermon_bitmask_free ( hermon->mtt_inuse, mtt_offset, num_pages );
574 * @v hermon Hermon device
575 * @v mtt MTT descriptor
577 static void hermon_free_mtt ( struct hermon *hermon,
578 struct hermon_mtt *mtt ) {
579 hermon_bitmask_free ( hermon->mtt_inuse, mtt->mtt_offset,
583 /***************************************************************************
585 * Completion queue operations
587 ***************************************************************************
591 * Create completion queue
593 * @v ibdev Infiniband device
594 * @v cq Completion queue
595 * @ret rc Return status code
597 static int hermon_create_cq ( struct ib_device *ibdev,
598 struct ib_completion_queue *cq ) {
599 struct hermon *hermon = ib_get_drvdata ( ibdev );
600 struct hermon_completion_queue *hermon_cq;
601 struct hermonprm_completion_queue_context cqctx;
606 /* Find a free completion queue number */
607 cqn_offset = hermon_bitmask_alloc ( hermon->cq_inuse,
609 if ( cqn_offset < 0 ) {
610 DBGC ( hermon, "Hermon %p out of completion queues\n",
615 cq->cqn = ( hermon->cap.reserved_cqs + cqn_offset );
617 /* Allocate control structures */
618 hermon_cq = zalloc ( sizeof ( *hermon_cq ) );
624 /* Allocate completion queue itself */
625 hermon_cq->cqe_size = ( cq->num_cqes * sizeof ( hermon_cq->cqe[0] ) );
626 hermon_cq->cqe = malloc_dma ( hermon_cq->cqe_size,
627 sizeof ( hermon_cq->cqe[0] ) );
628 if ( ! hermon_cq->cqe ) {
632 memset ( hermon_cq->cqe, 0, hermon_cq->cqe_size );
633 for ( i = 0 ; i < cq->num_cqes ; i++ ) {
634 MLX_FILL_1 ( &hermon_cq->cqe[i].normal, 7, owner, 1 );
638 /* Allocate MTT entries */
639 if ( ( rc = hermon_alloc_mtt ( hermon, hermon_cq->cqe,
641 &hermon_cq->mtt ) ) != 0 )
644 /* Hand queue over to hardware */
645 memset ( &cqctx, 0, sizeof ( cqctx ) );
646 MLX_FILL_1 ( &cqctx, 0, st, 0xa /* "Event fired" */ );
647 MLX_FILL_1 ( &cqctx, 2,
648 page_offset, ( hermon_cq->mtt.page_offset >> 5 ) );
649 MLX_FILL_2 ( &cqctx, 3,
650 usr_page, HERMON_UAR_PAGE,
651 log_cq_size, fls ( cq->num_cqes - 1 ) );
652 MLX_FILL_1 ( &cqctx, 7, mtt_base_addr_l,
653 ( hermon_cq->mtt.mtt_base_addr >> 3 ) );
654 MLX_FILL_1 ( &cqctx, 15, db_record_addr_l,
655 ( virt_to_phys ( &hermon_cq->doorbell ) >> 3 ) );
656 if ( ( rc = hermon_cmd_sw2hw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
657 DBGC ( hermon, "Hermon %p SW2HW_CQ failed: %s\n",
658 hermon, strerror ( rc ) );
662 DBGC ( hermon, "Hermon %p CQN %#lx ring at [%p,%p)\n",
663 hermon, cq->cqn, hermon_cq->cqe,
664 ( ( ( void * ) hermon_cq->cqe ) + hermon_cq->cqe_size ) );
665 ib_cq_set_drvdata ( cq, hermon_cq );
669 hermon_free_mtt ( hermon, &hermon_cq->mtt );
671 free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
675 hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
681 * Destroy completion queue
683 * @v ibdev Infiniband device
684 * @v cq Completion queue
686 static void hermon_destroy_cq ( struct ib_device *ibdev,
687 struct ib_completion_queue *cq ) {
688 struct hermon *hermon = ib_get_drvdata ( ibdev );
689 struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
690 struct hermonprm_completion_queue_context cqctx;
694 /* Take ownership back from hardware */
695 if ( ( rc = hermon_cmd_hw2sw_cq ( hermon, cq->cqn, &cqctx ) ) != 0 ) {
696 DBGC ( hermon, "Hermon %p FATAL HW2SW_CQ failed on CQN %#lx: "
697 "%s\n", hermon, cq->cqn, strerror ( rc ) );
698 /* Leak memory and return; at least we avoid corruption */
702 /* Free MTT entries */
703 hermon_free_mtt ( hermon, &hermon_cq->mtt );
706 free_dma ( hermon_cq->cqe, hermon_cq->cqe_size );
709 /* Mark queue number as free */
710 cqn_offset = ( cq->cqn - hermon->cap.reserved_cqs );
711 hermon_bitmask_free ( hermon->cq_inuse, cqn_offset, 1 );
713 ib_cq_set_drvdata ( cq, NULL );
716 /***************************************************************************
718 * Queue pair operations
720 ***************************************************************************
726 * @v ibdev Infiniband device
728 * @ret rc Return status code
730 static int hermon_create_qp ( struct ib_device *ibdev,
731 struct ib_queue_pair *qp ) {
732 struct hermon *hermon = ib_get_drvdata ( ibdev );
733 struct hermon_queue_pair *hermon_qp;
734 struct hermonprm_qp_ee_state_transitions qpctx;
738 /* Find a free queue pair number */
739 qpn_offset = hermon_bitmask_alloc ( hermon->qp_inuse,
741 if ( qpn_offset < 0 ) {
742 DBGC ( hermon, "Hermon %p out of queue pairs\n", hermon );
746 qp->qpn = ( HERMON_QPN_BASE + hermon->cap.reserved_qps +
749 /* Allocate control structures */
750 hermon_qp = zalloc ( sizeof ( *hermon_qp ) );
756 /* Allocate work queue buffer */
757 hermon_qp->send.num_wqes = ( qp->send.num_wqes /* headroom */ + 1 +
758 ( 2048 / sizeof ( hermon_qp->send.wqe[0] ) ) );
759 hermon_qp->send.num_wqes =
760 ( 1 << fls ( hermon_qp->send.num_wqes - 1 ) ); /* round up */
761 hermon_qp->send.wqe_size = ( hermon_qp->send.num_wqes *
762 sizeof ( hermon_qp->send.wqe[0] ) );
763 hermon_qp->recv.wqe_size = ( qp->recv.num_wqes *
764 sizeof ( hermon_qp->recv.wqe[0] ) );
765 hermon_qp->wqe_size = ( hermon_qp->send.wqe_size +
766 hermon_qp->recv.wqe_size );
767 hermon_qp->wqe = malloc_dma ( hermon_qp->wqe_size,
768 sizeof ( hermon_qp->send.wqe[0] ) );
769 if ( ! hermon_qp->wqe ) {
773 hermon_qp->send.wqe = hermon_qp->wqe;
774 memset ( hermon_qp->send.wqe, 0xff, hermon_qp->send.wqe_size );
775 hermon_qp->recv.wqe = ( hermon_qp->wqe + hermon_qp->send.wqe_size );
776 memset ( hermon_qp->recv.wqe, 0, hermon_qp->recv.wqe_size );
778 /* Allocate MTT entries */
779 if ( ( rc = hermon_alloc_mtt ( hermon, hermon_qp->wqe,
781 &hermon_qp->mtt ) ) != 0 ) {
785 /* Transition queue to INIT state */
786 memset ( &qpctx, 0, sizeof ( qpctx ) );
787 MLX_FILL_2 ( &qpctx, 2,
788 qpc_eec_data.pm_state, 0x03 /* Always 0x03 for UD */,
789 qpc_eec_data.st, HERMON_ST_UD );
790 MLX_FILL_1 ( &qpctx, 3, qpc_eec_data.pd, HERMON_GLOBAL_PD );
791 MLX_FILL_4 ( &qpctx, 4,
792 qpc_eec_data.log_rq_size, fls ( qp->recv.num_wqes - 1 ),
793 qpc_eec_data.log_rq_stride,
794 ( fls ( sizeof ( hermon_qp->recv.wqe[0] ) - 1 ) - 4 ),
795 qpc_eec_data.log_sq_size,
796 fls ( hermon_qp->send.num_wqes - 1 ),
797 qpc_eec_data.log_sq_stride,
798 ( fls ( sizeof ( hermon_qp->send.wqe[0] ) - 1 ) - 4 ) );
799 MLX_FILL_1 ( &qpctx, 5,
800 qpc_eec_data.usr_page, HERMON_UAR_PAGE );
801 MLX_FILL_1 ( &qpctx, 33, qpc_eec_data.cqn_snd, qp->send.cq->cqn );
802 MLX_FILL_1 ( &qpctx, 38, qpc_eec_data.page_offset,
803 ( hermon_qp->mtt.page_offset >> 6 ) );
804 MLX_FILL_1 ( &qpctx, 41, qpc_eec_data.cqn_rcv, qp->recv.cq->cqn );
805 MLX_FILL_1 ( &qpctx, 43, qpc_eec_data.db_record_addr_l,
806 ( virt_to_phys ( &hermon_qp->recv.doorbell ) >> 2 ) );
807 MLX_FILL_1 ( &qpctx, 44, qpc_eec_data.q_key, qp->qkey );
808 MLX_FILL_1 ( &qpctx, 53, qpc_eec_data.mtt_base_addr_l,
809 ( hermon_qp->mtt.mtt_base_addr >> 3 ) );
810 if ( ( rc = hermon_cmd_rst2init_qp ( hermon, qp->qpn,
812 DBGC ( hermon, "Hermon %p RST2INIT_QP failed: %s\n",
813 hermon, strerror ( rc ) );
814 goto err_rst2init_qp;
817 /* Transition queue to RTR state */
818 memset ( &qpctx, 0, sizeof ( qpctx ) );
819 MLX_FILL_2 ( &qpctx, 4,
820 qpc_eec_data.mtu, HERMON_MTU_2048,
821 qpc_eec_data.msg_max, 11 /* 2^11 = 2048 */ );
822 MLX_FILL_1 ( &qpctx, 16,
823 qpc_eec_data.primary_address_path.sched_queue,
824 ( 0x83 /* default policy */ |
825 ( ( ibdev->port - 1 ) << 6 ) ) );
826 if ( ( rc = hermon_cmd_init2rtr_qp ( hermon, qp->qpn,
828 DBGC ( hermon, "Hermon %p INIT2RTR_QP failed: %s\n",
829 hermon, strerror ( rc ) );
830 goto err_init2rtr_qp;
832 memset ( &qpctx, 0, sizeof ( qpctx ) );
833 if ( ( rc = hermon_cmd_rtr2rts_qp ( hermon, qp->qpn, &qpctx ) ) != 0 ){
834 DBGC ( hermon, "Hermon %p RTR2RTS_QP failed: %s\n",
835 hermon, strerror ( rc ) );
839 DBGC ( hermon, "Hermon %p QPN %#lx send ring at [%p,%p)\n",
840 hermon, qp->qpn, hermon_qp->send.wqe,
841 ( ((void *)hermon_qp->send.wqe ) + hermon_qp->send.wqe_size ) );
842 DBGC ( hermon, "Hermon %p QPN %#lx receive ring at [%p,%p)\n",
843 hermon, qp->qpn, hermon_qp->recv.wqe,
844 ( ((void *)hermon_qp->recv.wqe ) + hermon_qp->recv.wqe_size ) );
845 ib_qp_set_drvdata ( qp, hermon_qp );
850 hermon_cmd_2rst_qp ( hermon, qp->qpn );
852 hermon_free_mtt ( hermon, &hermon_qp->mtt );
854 free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
858 hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
866 * @v ibdev Infiniband device
869 static void hermon_destroy_qp ( struct ib_device *ibdev,
870 struct ib_queue_pair *qp ) {
871 struct hermon *hermon = ib_get_drvdata ( ibdev );
872 struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
876 /* Take ownership back from hardware */
877 if ( ( rc = hermon_cmd_2rst_qp ( hermon, qp->qpn ) ) != 0 ) {
878 DBGC ( hermon, "Hermon %p FATAL 2RST_QP failed on QPN %#lx: "
879 "%s\n", hermon, qp->qpn, strerror ( rc ) );
880 /* Leak memory and return; at least we avoid corruption */
884 /* Free MTT entries */
885 hermon_free_mtt ( hermon, &hermon_qp->mtt );
888 free_dma ( hermon_qp->wqe, hermon_qp->wqe_size );
891 /* Mark queue number as free */
892 qpn_offset = ( qp->qpn - HERMON_QPN_BASE -
893 hermon->cap.reserved_qps );
894 hermon_bitmask_free ( hermon->qp_inuse, qpn_offset, 1 );
896 ib_qp_set_drvdata ( qp, NULL );
899 /***************************************************************************
901 * Work request operations
903 ***************************************************************************
906 /** GID used for GID-less send work queue entries */
907 static const struct ib_gid hermon_no_gid = {
908 { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }
912 * Post send work queue entry
914 * @v ibdev Infiniband device
916 * @v av Address vector
917 * @v iobuf I/O buffer
918 * @ret rc Return status code
920 static int hermon_post_send ( struct ib_device *ibdev,
921 struct ib_queue_pair *qp,
922 struct ib_address_vector *av,
923 struct io_buffer *iobuf ) {
924 struct hermon *hermon = ib_get_drvdata ( ibdev );
925 struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
926 struct ib_work_queue *wq = &qp->send;
927 struct hermon_send_work_queue *hermon_send_wq = &hermon_qp->send;
928 struct hermonprm_ud_send_wqe *wqe;
929 const struct ib_gid *gid;
930 union hermonprm_doorbell_register db_reg;
931 unsigned int wqe_idx_mask;
933 /* Allocate work queue entry */
934 wqe_idx_mask = ( wq->num_wqes - 1 );
935 if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
936 DBGC ( hermon, "Hermon %p send queue full", hermon );
939 wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
940 wqe = &hermon_send_wq->wqe[ wq->next_idx &
941 ( hermon_send_wq->num_wqes - 1 ) ].ud;
943 /* Construct work queue entry */
944 memset ( ( ( ( void * ) wqe ) + 4 /* avoid ctrl.owner */ ), 0,
945 ( sizeof ( *wqe ) - 4 ) );
946 MLX_FILL_1 ( &wqe->ctrl, 1, ds, ( sizeof ( *wqe ) / 16 ) );
947 MLX_FILL_1 ( &wqe->ctrl, 2, c, 0x03 /* generate completion */ );
948 MLX_FILL_2 ( &wqe->ud, 0,
949 ud_address_vector.pd, HERMON_GLOBAL_PD,
950 ud_address_vector.port_number, ibdev->port );
951 MLX_FILL_2 ( &wqe->ud, 1,
952 ud_address_vector.rlid, av->dlid,
953 ud_address_vector.g, av->gid_present );
954 MLX_FILL_1 ( &wqe->ud, 2,
955 ud_address_vector.max_stat_rate,
956 ( ( ( av->rate < 2 ) || ( av->rate > 10 ) ) ?
957 8 : ( av->rate + 5 ) ) );
958 MLX_FILL_1 ( &wqe->ud, 3, ud_address_vector.sl, av->sl );
959 gid = ( av->gid_present ? &av->gid : &hermon_no_gid );
960 memcpy ( &wqe->ud.u.dwords[4], gid, sizeof ( *gid ) );
961 MLX_FILL_1 ( &wqe->ud, 8, destination_qp, av->dest_qp );
962 MLX_FILL_1 ( &wqe->ud, 9, q_key, av->qkey );
963 MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_len ( iobuf ) );
964 MLX_FILL_1 ( &wqe->data[0], 1, l_key, hermon->reserved_lkey );
965 MLX_FILL_1 ( &wqe->data[0], 3,
966 local_address_l, virt_to_bus ( iobuf->data ) );
968 MLX_FILL_2 ( &wqe->ctrl, 0,
969 opcode, HERMON_OPCODE_SEND,
971 ( ( wq->next_idx & hermon_send_wq->num_wqes ) ? 1 : 0 ) );
972 DBGCP ( hermon, "Hermon %p posting send WQE:\n", hermon );
973 DBGCP_HD ( hermon, wqe, sizeof ( *wqe ) );
976 /* Ring doorbell register */
977 MLX_FILL_1 ( &db_reg.send, 0, qn, qp->qpn );
978 DBGCP ( hermon, "Ringing doorbell %08lx with %08lx\n",
979 virt_to_phys ( hermon->uar + HERMON_DB_POST_SND_OFFSET ),
981 writel ( db_reg.dword[0], ( hermon->uar + HERMON_DB_POST_SND_OFFSET ));
983 /* Update work queue's index */
990 * Post receive work queue entry
992 * @v ibdev Infiniband device
994 * @v iobuf I/O buffer
995 * @ret rc Return status code
997 static int hermon_post_recv ( struct ib_device *ibdev,
998 struct ib_queue_pair *qp,
999 struct io_buffer *iobuf ) {
1000 struct hermon *hermon = ib_get_drvdata ( ibdev );
1001 struct hermon_queue_pair *hermon_qp = ib_qp_get_drvdata ( qp );
1002 struct ib_work_queue *wq = &qp->recv;
1003 struct hermon_recv_work_queue *hermon_recv_wq = &hermon_qp->recv;
1004 struct hermonprm_recv_wqe *wqe;
1005 unsigned int wqe_idx_mask;
1007 /* Allocate work queue entry */
1008 wqe_idx_mask = ( wq->num_wqes - 1 );
1009 if ( wq->iobufs[wq->next_idx & wqe_idx_mask] ) {
1010 DBGC ( hermon, "Hermon %p receive queue full", hermon );
1013 wq->iobufs[wq->next_idx & wqe_idx_mask] = iobuf;
1014 wqe = &hermon_recv_wq->wqe[wq->next_idx & wqe_idx_mask].recv;
1016 /* Construct work queue entry */
1017 MLX_FILL_1 ( &wqe->data[0], 0, byte_count, iob_tailroom ( iobuf ) );
1018 MLX_FILL_1 ( &wqe->data[0], 1, l_key, hermon->reserved_lkey );
1019 MLX_FILL_1 ( &wqe->data[0], 3,
1020 local_address_l, virt_to_bus ( iobuf->data ) );
1022 /* Update work queue's index */
1025 /* Update doorbell record */
1027 MLX_FILL_1 ( &hermon_recv_wq->doorbell, 0, receive_wqe_counter,
1028 ( wq->next_idx & 0xffff ) );
1036 * @v ibdev Infiniband device
1037 * @v cq Completion queue
1038 * @v cqe Hardware completion queue entry
1039 * @v complete_send Send completion handler
1040 * @v complete_recv Receive completion handler
1041 * @ret rc Return status code
1043 static int hermon_complete ( struct ib_device *ibdev,
1044 struct ib_completion_queue *cq,
1045 union hermonprm_completion_entry *cqe,
1046 ib_completer_t complete_send,
1047 ib_completer_t complete_recv ) {
1048 struct hermon *hermon = ib_get_drvdata ( ibdev );
1049 struct ib_completion completion;
1050 struct ib_work_queue *wq;
1051 struct ib_queue_pair *qp;
1052 struct hermon_queue_pair *hermon_qp;
1053 struct io_buffer *iobuf;
1054 ib_completer_t complete;
1055 unsigned int opcode;
1058 unsigned int wqe_idx;
1061 /* Parse completion */
1062 memset ( &completion, 0, sizeof ( completion ) );
1063 qpn = MLX_GET ( &cqe->normal, qpn );
1064 is_send = MLX_GET ( &cqe->normal, s_r );
1065 opcode = MLX_GET ( &cqe->normal, opcode );
1066 if ( opcode >= HERMON_OPCODE_RECV_ERROR ) {
1067 /* "s" field is not valid for error opcodes */
1068 is_send = ( opcode == HERMON_OPCODE_SEND_ERROR );
1069 completion.syndrome = MLX_GET ( &cqe->error, syndrome );
1070 DBGC ( hermon, "Hermon %p CQN %lx syndrome %x vendor %lx\n",
1071 hermon, cq->cqn, completion.syndrome,
1072 MLX_GET ( &cqe->error, vendor_error_syndrome ) );
1074 /* Don't return immediately; propagate error to completer */
1077 /* Identify work queue */
1078 wq = ib_find_wq ( cq, qpn, is_send );
1080 DBGC ( hermon, "Hermon %p CQN %lx unknown %s QPN %lx\n",
1081 hermon, cq->cqn, ( is_send ? "send" : "recv" ), qpn );
1085 hermon_qp = ib_qp_get_drvdata ( qp );
1087 /* Identify I/O buffer */
1088 wqe_idx = ( MLX_GET ( &cqe->normal, wqe_counter ) &
1089 ( wq->num_wqes - 1 ) );
1090 iobuf = wq->iobufs[wqe_idx];
1092 DBGC ( hermon, "Hermon %p CQN %lx QPN %lx empty WQE %x\n",
1093 hermon, cq->cqn, qpn, wqe_idx );
1096 wq->iobufs[wqe_idx] = NULL;
1098 /* Fill in length for received packets */
1100 completion.len = MLX_GET ( &cqe->normal, byte_cnt );
1101 if ( completion.len > iob_tailroom ( iobuf ) ) {
1102 DBGC ( hermon, "Hermon %p CQN %lx QPN %lx IDX %x "
1103 "overlength received packet length %zd\n",
1104 hermon, cq->cqn, qpn, wqe_idx, completion.len );
1109 /* Pass off to caller's completion handler */
1110 complete = ( is_send ? complete_send : complete_recv );
1111 complete ( ibdev, qp, &completion, iobuf );
1117 * Poll completion queue
1119 * @v ibdev Infiniband device
1120 * @v cq Completion queue
1121 * @v complete_send Send completion handler
1122 * @v complete_recv Receive completion handler
1124 static void hermon_poll_cq ( struct ib_device *ibdev,
1125 struct ib_completion_queue *cq,
1126 ib_completer_t complete_send,
1127 ib_completer_t complete_recv ) {
1128 struct hermon *hermon = ib_get_drvdata ( ibdev );
1129 struct hermon_completion_queue *hermon_cq = ib_cq_get_drvdata ( cq );
1130 union hermonprm_completion_entry *cqe;
1131 unsigned int cqe_idx_mask;
1135 /* Look for completion entry */
1136 cqe_idx_mask = ( cq->num_cqes - 1 );
1137 cqe = &hermon_cq->cqe[cq->next_idx & cqe_idx_mask];
1138 if ( MLX_GET ( &cqe->normal, owner ) ^
1139 ( ( cq->next_idx & cq->num_cqes ) ? 1 : 0 ) ) {
1140 /* Entry still owned by hardware; end of poll */
1143 DBGCP ( hermon, "Hermon %p completion:\n", hermon );
1144 DBGCP_HD ( hermon, cqe, sizeof ( *cqe ) );
1146 /* Handle completion */
1147 if ( ( rc = hermon_complete ( ibdev, cq, cqe, complete_send,
1148 complete_recv ) ) != 0 ) {
1149 DBGC ( hermon, "Hermon %p failed to complete: %s\n",
1150 hermon, strerror ( rc ) );
1151 DBGC_HD ( hermon, cqe, sizeof ( *cqe ) );
1154 /* Update completion queue's index */
1157 /* Update doorbell record */
1158 MLX_FILL_1 ( &hermon_cq->doorbell, 0, update_ci,
1159 ( cq->next_idx & 0xffffffUL ) );
1163 /***************************************************************************
1165 * Infiniband link-layer operations
1167 ***************************************************************************
1171 * Initialise Infiniband link
1173 * @v ibdev Infiniband device
1174 * @ret rc Return status code
1176 static int hermon_open ( struct ib_device *ibdev ) {
1177 struct hermon *hermon = ib_get_drvdata ( ibdev );
1178 struct hermonprm_init_port init_port;
1181 memset ( &init_port, 0, sizeof ( init_port ) );
1182 MLX_FILL_2 ( &init_port, 0,
1185 MLX_FILL_2 ( &init_port, 1,
1186 mtu, HERMON_MTU_2048,
1188 MLX_FILL_1 ( &init_port, 2, max_pkey, 64 );
1189 if ( ( rc = hermon_cmd_init_port ( hermon, ibdev->port,
1190 &init_port ) ) != 0 ) {
1191 DBGC ( hermon, "Hermon %p could not intialise port: %s\n",
1192 hermon, strerror ( rc ) );
1200 * Close Infiniband link
1202 * @v ibdev Infiniband device
1204 static void hermon_close ( struct ib_device *ibdev ) {
1205 struct hermon *hermon = ib_get_drvdata ( ibdev );
1208 if ( ( rc = hermon_cmd_close_port ( hermon, ibdev->port ) ) != 0 ) {
1209 DBGC ( hermon, "Hermon %p could not close port: %s\n",
1210 hermon, strerror ( rc ) );
1211 /* Nothing we can do about this */
1215 /***************************************************************************
1217 * Multicast group operations
1219 ***************************************************************************
1223 * Attach to multicast group
1225 * @v ibdev Infiniband device
1227 * @v gid Multicast GID
1228 * @ret rc Return status code
1230 static int hermon_mcast_attach ( struct ib_device *ibdev,
1231 struct ib_queue_pair *qp,
1232 struct ib_gid *gid ) {
1233 struct hermon *hermon = ib_get_drvdata ( ibdev );
1234 struct hermonprm_mgm_hash hash;
1235 struct hermonprm_mcg_entry mcg;
1239 /* Generate hash table index */
1240 if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
1241 DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
1242 hermon, strerror ( rc ) );
1245 index = MLX_GET ( &hash, hash );
1247 /* Check for existing hash table entry */
1248 if ( ( rc = hermon_cmd_read_mcg ( hermon, index, &mcg ) ) != 0 ) {
1249 DBGC ( hermon, "Hermon %p could not read MCG %#x: %s\n",
1250 hermon, index, strerror ( rc ) );
1253 if ( MLX_GET ( &mcg, hdr.members_count ) != 0 ) {
1254 /* FIXME: this implementation allows only a single QP
1255 * per multicast group, and doesn't handle hash
1256 * collisions. Sufficient for IPoIB but may need to
1257 * be extended in future.
1259 DBGC ( hermon, "Hermon %p MGID index %#x already in use\n",
1264 /* Update hash table entry */
1265 MLX_FILL_1 ( &mcg, 1, hdr.members_count, 1 );
1266 MLX_FILL_1 ( &mcg, 8, qp[0].qpn, qp->qpn );
1267 memcpy ( &mcg.u.dwords[4], gid, sizeof ( *gid ) );
1268 if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
1269 DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
1270 hermon, index, strerror ( rc ) );
1278 * Detach from multicast group
1280 * @v ibdev Infiniband device
1282 * @v gid Multicast GID
1284 static void hermon_mcast_detach ( struct ib_device *ibdev,
1285 struct ib_queue_pair *qp __unused,
1286 struct ib_gid *gid ) {
1287 struct hermon *hermon = ib_get_drvdata ( ibdev );
1288 struct hermonprm_mgm_hash hash;
1289 struct hermonprm_mcg_entry mcg;
1293 /* Generate hash table index */
1294 if ( ( rc = hermon_cmd_mgid_hash ( hermon, gid, &hash ) ) != 0 ) {
1295 DBGC ( hermon, "Hermon %p could not hash GID: %s\n",
1296 hermon, strerror ( rc ) );
1299 index = MLX_GET ( &hash, hash );
1301 /* Clear hash table entry */
1302 memset ( &mcg, 0, sizeof ( mcg ) );
1303 if ( ( rc = hermon_cmd_write_mcg ( hermon, index, &mcg ) ) != 0 ) {
1304 DBGC ( hermon, "Hermon %p could not write MCG %#x: %s\n",
1305 hermon, index, strerror ( rc ) );
1310 /***************************************************************************
1314 ***************************************************************************
1318 * Issue management datagram
1320 * @v ibdev Infiniband device
1321 * @v mad Management datagram
1322 * @v len Length of management datagram
1323 * @ret rc Return status code
1325 static int hermon_mad ( struct ib_device *ibdev, struct ib_mad_hdr *mad,
1327 struct hermon *hermon = ib_get_drvdata ( ibdev );
1328 union hermonprm_mad mad_ifc;
1331 /* Copy in request packet */
1332 memset ( &mad_ifc, 0, sizeof ( mad_ifc ) );
1333 assert ( len <= sizeof ( mad_ifc.mad ) );
1334 memcpy ( &mad_ifc.mad, mad, len );
1337 if ( ( rc = hermon_cmd_mad_ifc ( hermon, ibdev->port,
1338 &mad_ifc ) ) != 0 ) {
1339 DBGC ( hermon, "Hermon %p could not issue MAD IFC: %s\n",
1340 hermon, strerror ( rc ) );
1344 /* Copy out reply packet */
1345 memcpy ( mad, &mad_ifc.mad, len );
1347 if ( mad->status != 0 ) {
1348 DBGC ( hermon, "Hermon %p MAD IFC status %04x\n",
1349 hermon, ntohs ( mad->status ) );
1355 /** Hermon Infiniband operations */
1356 static struct ib_device_operations hermon_ib_operations = {
1357 .create_cq = hermon_create_cq,
1358 .destroy_cq = hermon_destroy_cq,
1359 .create_qp = hermon_create_qp,
1360 .destroy_qp = hermon_destroy_qp,
1361 .post_send = hermon_post_send,
1362 .post_recv = hermon_post_recv,
1363 .poll_cq = hermon_poll_cq,
1364 .open = hermon_open,
1365 .close = hermon_close,
1366 .mcast_attach = hermon_mcast_attach,
1367 .mcast_detach = hermon_mcast_detach,
1371 /***************************************************************************
1375 ***************************************************************************
1379 * Start firmware running
1381 * @v hermon Hermon device
1382 * @ret rc Return status code
1384 static int hermon_start_firmware ( struct hermon *hermon ) {
1385 struct hermonprm_query_fw fw;
1386 struct hermonprm_virtual_physical_mapping map_fa;
1387 unsigned int fw_pages;
1388 unsigned int log2_fw_pages;
1393 /* Get firmware parameters */
1394 if ( ( rc = hermon_cmd_query_fw ( hermon, &fw ) ) != 0 ) {
1395 DBGC ( hermon, "Hermon %p could not query firmware: %s\n",
1396 hermon, strerror ( rc ) );
1399 DBGC ( hermon, "Hermon %p firmware version %ld.%ld.%ld\n", hermon,
1400 MLX_GET ( &fw, fw_rev_major ), MLX_GET ( &fw, fw_rev_minor ),
1401 MLX_GET ( &fw, fw_rev_subminor ) );
1402 fw_pages = MLX_GET ( &fw, fw_pages );
1403 log2_fw_pages = fls ( fw_pages - 1 );
1404 fw_pages = ( 1 << log2_fw_pages );
1405 DBGC ( hermon, "Hermon %p requires %d kB for firmware\n",
1406 hermon, ( fw_pages * 4 ) );
1408 /* Allocate firmware pages and map firmware area */
1409 fw_size = ( fw_pages * HERMON_PAGE_SIZE );
1410 hermon->firmware_area = umalloc ( fw_size );
1411 if ( ! hermon->firmware_area ) {
1415 fw_base = ( user_to_phys ( hermon->firmware_area, fw_size ) &
1417 DBGC ( hermon, "Hermon %p firmware area at physical [%lx,%lx)\n",
1418 hermon, fw_base, ( fw_base + fw_size ) );
1419 memset ( &map_fa, 0, sizeof ( map_fa ) );
1420 MLX_FILL_2 ( &map_fa, 3,
1421 log2size, log2_fw_pages,
1422 pa_l, ( fw_base >> 12 ) );
1423 if ( ( rc = hermon_cmd_map_fa ( hermon, &map_fa ) ) != 0 ) {
1424 DBGC ( hermon, "Hermon %p could not map firmware: %s\n",
1425 hermon, strerror ( rc ) );
1429 /* Start firmware */
1430 if ( ( rc = hermon_cmd_run_fw ( hermon ) ) != 0 ) {
1431 DBGC ( hermon, "Hermon %p could not run firmware: %s\n",
1432 hermon, strerror ( rc ) );
1436 DBGC ( hermon, "Hermon %p firmware started\n", hermon );
1440 hermon_cmd_unmap_fa ( hermon );
1442 ufree ( hermon->firmware_area );
1443 hermon->firmware_area = UNULL;
1450 * Stop firmware running
1452 * @v hermon Hermon device
1454 static void hermon_stop_firmware ( struct hermon *hermon ) {
1457 if ( ( rc = hermon_cmd_unmap_fa ( hermon ) ) != 0 ) {
1458 DBGC ( hermon, "Hermon %p FATAL could not stop firmware: %s\n",
1459 hermon, strerror ( rc ) );
1460 /* Leak memory and return; at least we avoid corruption */
1463 ufree ( hermon->firmware_area );
1464 hermon->firmware_area = UNULL;
1467 /***************************************************************************
1469 * Infinihost Context Memory management
1471 ***************************************************************************
1477 * @v hermon Hermon device
1478 * @ret rc Return status code
1480 static int hermon_get_cap ( struct hermon *hermon ) {
1481 struct hermonprm_query_dev_cap dev_cap;
1484 if ( ( rc = hermon_cmd_query_dev_cap ( hermon, &dev_cap ) ) != 0 ) {
1485 DBGC ( hermon, "Hermon %p could not get device limits: %s\n",
1486 hermon, strerror ( rc ) );
1490 hermon->cap.cmpt_entry_size = MLX_GET ( &dev_cap, c_mpt_entry_sz );
1491 hermon->cap.reserved_qps =
1492 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_qps ) );
1493 hermon->cap.qpc_entry_size = MLX_GET ( &dev_cap, qpc_entry_sz );
1494 hermon->cap.altc_entry_size = MLX_GET ( &dev_cap, altc_entry_sz );
1495 hermon->cap.auxc_entry_size = MLX_GET ( &dev_cap, aux_entry_sz );
1496 hermon->cap.reserved_srqs =
1497 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_srqs ) );
1498 hermon->cap.srqc_entry_size = MLX_GET ( &dev_cap, srq_entry_sz );
1499 hermon->cap.reserved_cqs =
1500 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_cqs ) );
1501 hermon->cap.cqc_entry_size = MLX_GET ( &dev_cap, cqc_entry_sz );
1502 hermon->cap.reserved_eqs = MLX_GET ( &dev_cap, num_rsvd_eqs );
1503 hermon->cap.eqc_entry_size = MLX_GET ( &dev_cap, eqc_entry_sz );
1504 hermon->cap.reserved_mtts =
1505 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mtts ) );
1506 hermon->cap.mtt_entry_size = MLX_GET ( &dev_cap, mtt_entry_sz );
1507 hermon->cap.reserved_mrws =
1508 ( 1 << MLX_GET ( &dev_cap, log2_rsvd_mrws ) );
1509 hermon->cap.dmpt_entry_size = MLX_GET ( &dev_cap, d_mpt_entry_sz );
1510 hermon->cap.reserved_uars = MLX_GET ( &dev_cap, num_rsvd_uars );
1518 * @v log_num_entries Log2 of the number of entries
1519 * @v entry_size Entry size
1520 * @ret usage Usage size in ICM
1522 static size_t icm_usage ( unsigned int log_num_entries, size_t entry_size ) {
1525 usage = ( ( 1 << log_num_entries ) * entry_size );
1526 usage = ( ( usage + HERMON_PAGE_SIZE - 1 ) &
1527 ~( HERMON_PAGE_SIZE - 1 ) );
1534 * @v hermon Hermon device
1535 * @v init_hca INIT_HCA structure to fill in
1536 * @ret rc Return status code
1538 static int hermon_alloc_icm ( struct hermon *hermon,
1539 struct hermonprm_init_hca *init_hca ) {
1540 struct hermonprm_scalar_parameter icm_size;
1541 struct hermonprm_scalar_parameter icm_aux_size;
1542 struct hermonprm_virtual_physical_mapping map_icm_aux;
1543 struct hermonprm_virtual_physical_mapping map_icm;
1544 uint64_t icm_offset = 0;
1545 unsigned int log_num_qps, log_num_srqs, log_num_cqs, log_num_eqs;
1546 unsigned int log_num_mtts, log_num_mpts;
1547 size_t cmpt_max_len;
1548 size_t qp_cmpt_len, srq_cmpt_len, cq_cmpt_len, eq_cmpt_len;
1549 size_t icm_len, icm_aux_len;
1550 physaddr_t icm_phys;
1555 * Start by carving up the ICM virtual address space
1559 /* Calculate number of each object type within ICM */
1560 log_num_qps = fls ( hermon->cap.reserved_qps + HERMON_MAX_QPS - 1 );
1561 log_num_srqs = fls ( hermon->cap.reserved_srqs - 1 );
1562 log_num_cqs = fls ( hermon->cap.reserved_cqs + HERMON_MAX_CQS - 1 );
1563 log_num_eqs = fls ( hermon->cap.reserved_eqs + HERMON_MAX_EQS - 1 );
1564 log_num_mtts = fls ( hermon->cap.reserved_mtts + HERMON_MAX_MTTS - 1 );
1566 /* ICM starts with the cMPT tables, which are sparse */
1567 cmpt_max_len = ( HERMON_CMPT_MAX_ENTRIES *
1568 ( ( uint64_t ) hermon->cap.cmpt_entry_size ) );
1569 qp_cmpt_len = icm_usage ( log_num_qps, hermon->cap.cmpt_entry_size );
1570 hermon->icm_map[HERMON_ICM_QP_CMPT].offset = icm_offset;
1571 hermon->icm_map[HERMON_ICM_QP_CMPT].len = qp_cmpt_len;
1572 icm_offset += cmpt_max_len;
1573 srq_cmpt_len = icm_usage ( log_num_srqs, hermon->cap.cmpt_entry_size );
1574 hermon->icm_map[HERMON_ICM_SRQ_CMPT].offset = icm_offset;
1575 hermon->icm_map[HERMON_ICM_SRQ_CMPT].len = srq_cmpt_len;
1576 icm_offset += cmpt_max_len;
1577 cq_cmpt_len = icm_usage ( log_num_cqs, hermon->cap.cmpt_entry_size );
1578 hermon->icm_map[HERMON_ICM_CQ_CMPT].offset = icm_offset;
1579 hermon->icm_map[HERMON_ICM_CQ_CMPT].len = cq_cmpt_len;
1580 icm_offset += cmpt_max_len;
1581 eq_cmpt_len = icm_usage ( log_num_eqs, hermon->cap.cmpt_entry_size );
1582 hermon->icm_map[HERMON_ICM_EQ_CMPT].offset = icm_offset;
1583 hermon->icm_map[HERMON_ICM_EQ_CMPT].len = eq_cmpt_len;
1584 icm_offset += cmpt_max_len;
1586 hermon->icm_map[HERMON_ICM_OTHER].offset = icm_offset;
1588 /* Queue pair contexts */
1589 MLX_FILL_1 ( init_hca, 12,
1590 qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_h,
1591 ( icm_offset >> 32 ) );
1592 MLX_FILL_2 ( init_hca, 13,
1593 qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr_l,
1594 ( icm_offset >> 5 ),
1595 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp,
1597 DBGC ( hermon, "Hermon %p ICM QPC base = %llx\n", hermon, icm_offset );
1598 icm_offset += icm_usage ( log_num_qps, hermon->cap.qpc_entry_size );
1600 /* Extended alternate path contexts */
1601 MLX_FILL_1 ( init_hca, 24,
1602 qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_h,
1603 ( icm_offset >> 32 ) );
1604 MLX_FILL_1 ( init_hca, 25,
1605 qpc_eec_cqc_eqc_rdb_parameters.altc_base_addr_l,
1607 DBGC ( hermon, "Hermon %p ICM ALTC base = %llx\n", hermon, icm_offset);
1608 icm_offset += icm_usage ( log_num_qps,
1609 hermon->cap.altc_entry_size );
1611 /* Extended auxiliary contexts */
1612 MLX_FILL_1 ( init_hca, 28,
1613 qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_h,
1614 ( icm_offset >> 32 ) );
1615 MLX_FILL_1 ( init_hca, 29,
1616 qpc_eec_cqc_eqc_rdb_parameters.auxc_base_addr_l,
1618 DBGC ( hermon, "Hermon %p ICM AUXC base = %llx\n", hermon, icm_offset);
1619 icm_offset += icm_usage ( log_num_qps,
1620 hermon->cap.auxc_entry_size );
1622 /* Shared receive queue contexts */
1623 MLX_FILL_1 ( init_hca, 18,
1624 qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_h,
1625 ( icm_offset >> 32 ) );
1626 MLX_FILL_2 ( init_hca, 19,
1627 qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr_l,
1628 ( icm_offset >> 5 ),
1629 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq,
1631 DBGC ( hermon, "Hermon %p ICM SRQC base = %llx\n", hermon, icm_offset);
1632 icm_offset += icm_usage ( log_num_srqs,
1633 hermon->cap.srqc_entry_size );
1635 /* Completion queue contexts */
1636 MLX_FILL_1 ( init_hca, 20,
1637 qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_h,
1638 ( icm_offset >> 32 ) );
1639 MLX_FILL_2 ( init_hca, 21,
1640 qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr_l,
1641 ( icm_offset >> 5 ),
1642 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq,
1644 DBGC ( hermon, "Hermon %p ICM CQC base = %llx\n", hermon, icm_offset );
1645 icm_offset += icm_usage ( log_num_cqs, hermon->cap.cqc_entry_size );
1647 /* Event queue contexts */
1648 MLX_FILL_1 ( init_hca, 32,
1649 qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_h,
1650 ( icm_offset >> 32 ) );
1651 MLX_FILL_2 ( init_hca, 33,
1652 qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr_l,
1653 ( icm_offset >> 5 ),
1654 qpc_eec_cqc_eqc_rdb_parameters.log_num_of_eq,
1656 DBGC ( hermon, "Hermon %p ICM EQC base = %llx\n", hermon, icm_offset );
1657 icm_offset += icm_usage ( log_num_eqs, hermon->cap.eqc_entry_size );
1659 /* Memory translation table */
1660 MLX_FILL_1 ( init_hca, 64,
1661 tpt_parameters.mtt_base_addr_h, ( icm_offset >> 32 ) );
1662 MLX_FILL_1 ( init_hca, 65,
1663 tpt_parameters.mtt_base_addr_l, icm_offset );
1664 DBGC ( hermon, "Hermon %p ICM MTT base = %llx\n", hermon, icm_offset );
1665 icm_offset += icm_usage ( log_num_mtts,
1666 hermon->cap.mtt_entry_size );
1668 /* Memory protection table */
1669 log_num_mpts = fls ( hermon->cap.reserved_mrws + 1 - 1 );
1670 MLX_FILL_1 ( init_hca, 60,
1671 tpt_parameters.dmpt_base_adr_h, ( icm_offset >> 32 ) );
1672 MLX_FILL_1 ( init_hca, 61,
1673 tpt_parameters.dmpt_base_adr_l, icm_offset );
1674 MLX_FILL_1 ( init_hca, 62,
1675 tpt_parameters.log_dmpt_sz, log_num_mpts );
1676 DBGC ( hermon, "Hermon %p ICM DMPT base = %llx\n", hermon, icm_offset);
1677 icm_offset += icm_usage ( log_num_mpts,
1678 hermon->cap.dmpt_entry_size );
1680 /* Multicast table */
1681 MLX_FILL_1 ( init_hca, 48,
1682 multicast_parameters.mc_base_addr_h,
1683 ( icm_offset >> 32 ) );
1684 MLX_FILL_1 ( init_hca, 49,
1685 multicast_parameters.mc_base_addr_l, icm_offset );
1686 MLX_FILL_1 ( init_hca, 52,
1687 multicast_parameters.log_mc_table_entry_sz,
1688 fls ( sizeof ( struct hermonprm_mcg_entry ) - 1 ) );
1689 MLX_FILL_1 ( init_hca, 53,
1690 multicast_parameters.log_mc_table_hash_sz, 3 );
1691 MLX_FILL_1 ( init_hca, 54,
1692 multicast_parameters.log_mc_table_sz, 3 );
1693 DBGC ( hermon, "Hermon %p ICM MC base = %llx\n", hermon, icm_offset );
1694 icm_offset += ( ( 8 * sizeof ( struct hermonprm_mcg_entry ) +
1695 HERMON_PAGE_SIZE - 1 ) & ~( HERMON_PAGE_SIZE - 1 ) );
1697 hermon->icm_map[HERMON_ICM_OTHER].len =
1698 ( icm_offset - hermon->icm_map[HERMON_ICM_OTHER].offset );
1701 * Allocate and map physical memory for (portions of) ICM
1704 * ICM AUX area (aligned to its own size)
1709 /* Calculate physical memory required for ICM */
1711 for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
1712 icm_len += hermon->icm_map[i].len;
1715 /* Get ICM auxiliary area size */
1716 memset ( &icm_size, 0, sizeof ( icm_size ) );
1717 MLX_FILL_1 ( &icm_size, 0, value_hi, ( icm_offset >> 32 ) );
1718 MLX_FILL_1 ( &icm_size, 1, value, icm_offset );
1719 if ( ( rc = hermon_cmd_set_icm_size ( hermon, &icm_size,
1720 &icm_aux_size ) ) != 0 ) {
1721 DBGC ( hermon, "Hermon %p could not set ICM size: %s\n",
1722 hermon, strerror ( rc ) );
1723 goto err_set_icm_size;
1725 icm_aux_len = ( MLX_GET ( &icm_aux_size, value ) * HERMON_PAGE_SIZE );
1726 /* Must round up to nearest power of two :( */
1727 icm_aux_len = ( 1 << fls ( icm_aux_len - 1 ) );
1729 /* Allocate ICM data and auxiliary area */
1730 DBGC ( hermon, "Hermon %p requires %zd kB ICM and %zd kB AUX ICM\n",
1731 hermon, ( icm_len / 1024 ), ( icm_aux_len / 1024 ) );
1732 hermon->icm = umalloc ( 2 * icm_aux_len + icm_len );
1733 if ( ! hermon->icm ) {
1737 icm_phys = user_to_phys ( hermon->icm, 0 );
1739 /* Map ICM auxiliary area */
1740 icm_phys = ( ( icm_phys + icm_aux_len - 1 ) & ~( icm_aux_len - 1 ) );
1741 memset ( &map_icm_aux, 0, sizeof ( map_icm_aux ) );
1742 MLX_FILL_2 ( &map_icm_aux, 3,
1743 log2size, fls ( ( icm_aux_len / HERMON_PAGE_SIZE ) - 1 ),
1744 pa_l, ( icm_phys >> 12 ) );
1745 DBGC ( hermon, "Hermon %p mapping ICM AUX (2^%d pages) => %08lx\n",
1746 hermon, fls ( ( icm_aux_len / HERMON_PAGE_SIZE ) - 1 ),
1748 if ( ( rc = hermon_cmd_map_icm_aux ( hermon, &map_icm_aux ) ) != 0 ) {
1749 DBGC ( hermon, "Hermon %p could not map AUX ICM: %s\n",
1750 hermon, strerror ( rc ) );
1751 goto err_map_icm_aux;
1753 icm_phys += icm_aux_len;
1756 for ( i = 0 ; i < HERMON_ICM_NUM_REGIONS ; i++ ) {
1757 memset ( &map_icm, 0, sizeof ( map_icm ) );
1758 MLX_FILL_1 ( &map_icm, 0,
1759 va_h, ( hermon->icm_map[i].offset >> 32 ) );
1760 MLX_FILL_1 ( &map_icm, 1,
1761 va_l, ( hermon->icm_map[i].offset >> 12 ) );
1762 MLX_FILL_2 ( &map_icm, 3,
1764 fls ( ( hermon->icm_map[i].len /
1765 HERMON_PAGE_SIZE ) - 1 ),
1766 pa_l, ( icm_phys >> 12 ) );
1767 DBGC ( hermon, "Hermon %p mapping ICM %llx+%zx (2^%d pages) "
1768 "=> %08lx\n", hermon, hermon->icm_map[i].offset,
1769 hermon->icm_map[i].len,
1770 fls ( ( hermon->icm_map[i].len /
1771 HERMON_PAGE_SIZE ) - 1 ), icm_phys );
1772 if ( ( rc = hermon_cmd_map_icm ( hermon, &map_icm ) ) != 0 ) {
1773 DBGC ( hermon, "Hermon %p could not map ICM: %s\n",
1774 hermon, strerror ( rc ) );
1777 icm_phys += hermon->icm_map[i].len;
1783 assert ( i == 0 ); /* We don't handle partial failure at present */
1784 hermon_cmd_unmap_icm_aux ( hermon );
1786 ufree ( hermon->icm );
1787 hermon->icm = UNULL;
1796 * @v hermon Hermon device
1798 static void hermon_free_icm ( struct hermon *hermon ) {
1799 struct hermonprm_scalar_parameter unmap_icm;
1802 for ( i = ( HERMON_ICM_NUM_REGIONS - 1 ) ; i >= 0 ; i-- ) {
1803 memset ( &unmap_icm, 0, sizeof ( unmap_icm ) );
1804 MLX_FILL_1 ( &unmap_icm, 0, value_hi,
1805 ( hermon->icm_map[i].offset >> 32 ) );
1806 MLX_FILL_1 ( &unmap_icm, 1, value,
1807 hermon->icm_map[i].offset );
1808 hermon_cmd_unmap_icm ( hermon,
1809 ( 1 << fls ( ( hermon->icm_map[i].len /
1810 HERMON_PAGE_SIZE ) - 1)),
1813 hermon_cmd_unmap_icm_aux ( hermon );
1814 ufree ( hermon->icm );
1815 hermon->icm = UNULL;
1818 /***************************************************************************
1822 ***************************************************************************
1826 * Set up memory protection table
1828 * @v hermon Hermon device
1829 * @ret rc Return status code
1831 static int hermon_setup_mpt ( struct hermon *hermon ) {
1832 struct hermonprm_mpt mpt;
1837 key = ( hermon->cap.reserved_mrws | HERMON_MKEY_PREFIX );
1838 hermon->reserved_lkey = ( ( key << 8 ) | ( key >> 24 ) );
1840 /* Initialise memory protection table */
1841 memset ( &mpt, 0, sizeof ( mpt ) );
1842 MLX_FILL_4 ( &mpt, 0,
1847 MLX_FILL_1 ( &mpt, 2, mem_key, key );
1848 MLX_FILL_1 ( &mpt, 3, pd, HERMON_GLOBAL_PD );
1849 MLX_FILL_1 ( &mpt, 10, len64, 1 );
1850 if ( ( rc = hermon_cmd_sw2hw_mpt ( hermon,
1851 hermon->cap.reserved_mrws,
1853 DBGC ( hermon, "Hermon %p could not set up MPT: %s\n",
1854 hermon, strerror ( rc ) );
1866 * @ret rc Return status code
1868 static int hermon_probe ( struct pci_device *pci,
1869 const struct pci_device_id *id __unused ) {
1870 struct hermon *hermon;
1871 struct ib_device *ibdev;
1872 struct hermonprm_init_hca init_hca;
1876 /* Allocate Hermon device */
1877 hermon = zalloc ( sizeof ( *hermon ) );
1880 goto err_alloc_hermon;
1882 pci_set_drvdata ( pci, hermon );
1884 /* Allocate Infiniband devices */
1885 for ( i = 0 ; i < HERMON_NUM_PORTS ; i++ ) {
1886 ibdev = alloc_ibdev ( 0 );
1889 goto err_alloc_ibdev;
1891 hermon->ibdev[i] = ibdev;
1892 ibdev->op = &hermon_ib_operations;
1893 ibdev->dev = &pci->dev;
1894 ibdev->port = ( HERMON_PORT_BASE + i );
1895 ib_set_drvdata ( ibdev, hermon );
1898 /* Fix up PCI device */
1899 adjust_pci_device ( pci );
1902 hermon->config = ioremap ( pci_bar_start ( pci, HERMON_PCI_CONFIG_BAR),
1903 HERMON_PCI_CONFIG_BAR_SIZE );
1904 hermon->uar = ioremap ( ( pci_bar_start ( pci, HERMON_PCI_UAR_BAR ) +
1905 HERMON_UAR_PAGE * HERMON_PAGE_SIZE ),
1908 /* Allocate space for mailboxes */
1909 hermon->mailbox_in = malloc_dma ( HERMON_MBOX_SIZE,
1910 HERMON_MBOX_ALIGN );
1911 if ( ! hermon->mailbox_in ) {
1913 goto err_mailbox_in;
1915 hermon->mailbox_out = malloc_dma ( HERMON_MBOX_SIZE,
1916 HERMON_MBOX_ALIGN );
1917 if ( ! hermon->mailbox_out ) {
1919 goto err_mailbox_out;
1922 /* Start firmware */
1923 if ( ( rc = hermon_start_firmware ( hermon ) ) != 0 )
1924 goto err_start_firmware;
1926 /* Get device limits */
1927 if ( ( rc = hermon_get_cap ( hermon ) ) != 0 )
1931 memset ( &init_hca, 0, sizeof ( init_hca ) );
1932 if ( ( rc = hermon_alloc_icm ( hermon, &init_hca ) ) != 0 )
1935 /* Initialise HCA */
1936 MLX_FILL_1 ( &init_hca, 0, version, 0x02 /* "Must be 0x02" */ );
1937 MLX_FILL_1 ( &init_hca, 5, udp, 1 );
1938 MLX_FILL_1 ( &init_hca, 74, uar_parameters.log_max_uars, 8 );
1939 if ( ( rc = hermon_cmd_init_hca ( hermon, &init_hca ) ) != 0 ) {
1940 DBGC ( hermon, "Hermon %p could not initialise HCA: %s\n",
1941 hermon, strerror ( rc ) );
1945 /* Set up memory protection */
1946 if ( ( rc = hermon_setup_mpt ( hermon ) ) != 0 )
1949 /* Register Infiniband devices */
1950 for ( i = 0 ; i < HERMON_NUM_PORTS ; i++ ) {
1951 if ( ( rc = register_ibdev ( hermon->ibdev[i] ) ) != 0 ) {
1952 DBGC ( hermon, "Hermon %p could not register IB "
1953 "device: %s\n", hermon, strerror ( rc ) );
1954 goto err_register_ibdev;
1960 i = ( HERMON_NUM_PORTS - 1 );
1962 for ( ; i >= 0 ; i-- )
1963 unregister_ibdev ( hermon->ibdev[i] );
1965 hermon_cmd_close_hca ( hermon );
1967 hermon_free_icm ( hermon );
1970 hermon_stop_firmware ( hermon );
1972 free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
1974 free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
1976 i = ( HERMON_NUM_PORTS - 1 );
1978 for ( ; i >= 0 ; i-- )
1979 free_ibdev ( hermon->ibdev[i] );
1990 static void hermon_remove ( struct pci_device *pci ) {
1991 struct hermon *hermon = pci_get_drvdata ( pci );
1994 for ( i = ( HERMON_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
1995 unregister_ibdev ( hermon->ibdev[i] );
1996 hermon_cmd_close_hca ( hermon );
1997 hermon_free_icm ( hermon );
1998 hermon_stop_firmware ( hermon );
1999 hermon_stop_firmware ( hermon );
2000 free_dma ( hermon->mailbox_out, HERMON_MBOX_SIZE );
2001 free_dma ( hermon->mailbox_in, HERMON_MBOX_SIZE );
2002 for ( i = ( HERMON_NUM_PORTS - 1 ) ; i >= 0 ; i-- )
2003 free_ibdev ( hermon->ibdev[i] );
2007 static struct pci_device_id hermon_nics[] = {
2008 PCI_ROM ( 0x15b3, 0x6340, "mt25408", "MT25408 HCA driver" ),
2009 PCI_ROM ( 0x15b3, 0x634a, "mt25418", "MT25418 HCA driver" ),
2012 struct pci_driver hermon_driver __pci_driver = {
2014 .id_count = ( sizeof ( hermon_nics ) / sizeof ( hermon_nics[0] ) ),
2015 .probe = hermon_probe,
2016 .remove = hermon_remove,