[MLX4] added support for 26438 and 26488 devices. [mlnx: 4943]
[mirror/winof/.git] / hw / mlx4 / user / hca / mlx4.c
1 /*\r
2  * Copyright (c) 2007 Cisco, Inc.  All rights reserved.\r
3  *\r
4  * This software is available to you under a choice of one of two\r
5  * licenses.  You may choose to be licensed under the terms of the GNU\r
6  * General Public License (GPL) Version 2, available from the file\r
7  * COPYING in the main directory of this source tree, or the\r
8  * OpenIB.org BSD license below:\r
9  *\r
10  *     Redistribution and use in source and binary forms, with or\r
11  *     without modification, are permitted provided that the following\r
12  *     conditions are met:\r
13  *\r
14  *      - Redistributions of source code must retain the above\r
15  *        copyright notice, this list of conditions and the following\r
16  *        disclaimer.\r
17  *\r
18  *      - Redistributions in binary form must reproduce the above\r
19  *        copyright notice, this list of conditions and the following\r
20  *        disclaimer in the documentation and/or other materials\r
21  *        provided with the distribution.\r
22  *\r
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
30  * SOFTWARE.\r
31  */\r
32 \r
33 \r
34 #include "mlx4.h"\r
35 #include "mx_abi.h"\r
36 \r
37 #ifndef PCI_VENDOR_ID_MELLANOX\r
38 #define PCI_VENDOR_ID_MELLANOX                  0x15b3\r
39 #endif\r
40 \r
41 #define HCA(v, d) \\r
42         {PCI_VENDOR_ID_##v,                     \\r
43           d }\r
44 \r
45 struct {\r
46         unsigned                vendor;\r
47         unsigned                device;\r
48 } hca_table[] = {\r
49         HCA(MELLANOX, 0x6340),  /* MT25408 "Hermon" SDR */\r
50         HCA(MELLANOX, 0x634a),  /* MT25418 "Hermon" DDR */\r
51         HCA(MELLANOX, 0x6732),  /* MT26418 "Hermon" DDR PCIe gen2 */\r
52         HCA(MELLANOX, 0x6778),  /* MT26488 "Hermon" DDR PCIe gen2 */\r
53         HCA(MELLANOX, 0x673C),  /* MT26428 "Hermon" QDR PCIe gen2 */\r
54         HCA(MELLANOX, 0x6746),  /* MT26438 "Hermon" QDR PCIe gen2 */\r
55 \r
56         HCA(MELLANOX, 0x6368),  /* MT25448 "Hermon" Ethernet */\r
57         HCA(MELLANOX, 0x6372),  /* MT25458 "Hermon" Ethernet Yatir*/\r
58         HCA(MELLANOX, 0x6750),  /* MT26448 "Hermon" Ethernet PCIe gen2 */\r
59         HCA(MELLANOX, 0x675A),  /* MT26458 "Hermon" Ethernet Yatir PCIe gen2*/\r
60         HCA(MELLANOX, 0x6764),  /* MT26468 "Hermon" B0 Ethernet PCIe gen2*/\r
61         HCA(MELLANOX, 0x676E),  /* MT26478 "Hermon" B0 40Gb Ethernet PCIe gen2*/\r
62 \r
63         HCA(MELLANOX, 0x0191),  /* MT25408 "Hermon" livefish mode */ \r
64 };\r
65 \r
66 \r
67 struct ibv_context * mlx4_alloc_context()\r
68 {\r
69         struct mlx4_context *context;\r
70         \r
71         /* allocate context */\r
72         context = cl_zalloc(sizeof *context);\r
73         if (!context)\r
74                 goto end;\r
75 \r
76         context->qp_table_mutex = CreateMutex(NULL, FALSE, NULL);\r
77         if (!context->qp_table_mutex)\r
78                 goto err_qp_mutex;\r
79 \r
80 #ifdef XRC_SUPPORT\r
81         context->xrc_srq_table_mutex = CreateMutex(NULL, FALSE, NULL);\r
82         if (!context->xrc_srq_table_mutex)\r
83                 goto err_xrc_mutex;\r
84 #endif  \r
85 \r
86         context->db_list_mutex = CreateMutex(NULL, FALSE, NULL);\r
87         if (!context->db_list_mutex)\r
88                 goto err_db_mutex;\r
89 \r
90         if (cl_spinlock_init(&context->uar_lock))\r
91                 goto err_uar_spinlock;\r
92 \r
93         if (cl_spinlock_init(&context->bf_lock))\r
94                 goto err_bf_spinlock;\r
95 \r
96         return &context->ibv_ctx;\r
97 \r
98 err_bf_spinlock:\r
99         cl_spinlock_destroy(&context->uar_lock);\r
100 err_uar_spinlock:\r
101         CloseHandle(context->db_list_mutex);\r
102 err_db_mutex:\r
103 #ifdef XRC_SUPPORT\r
104         CloseHandle(context->xrc_srq_table_mutex);\r
105 err_xrc_mutex:\r
106 #endif  \r
107         CloseHandle(context->qp_table_mutex);\r
108 err_qp_mutex:\r
109         cl_free(context);\r
110 end:\r
111         return NULL;\r
112         \r
113 }\r
114 \r
115 struct ibv_context * mlx4_fill_context(struct ibv_context *ctx, struct ibv_get_context_resp *p_resp)\r
116 {\r
117         struct mlx4_context *context = to_mctx(ctx);\r
118         SYSTEM_INFO sys_info;\r
119         int i;\r
120 \r
121         /* check device type */\r
122         for (i = 0; i < sizeof hca_table / sizeof hca_table[0]; ++i) \r
123                 if (p_resp->vend_id == hca_table[i].vendor &&\r
124                         p_resp->dev_id == hca_table[i].device) \r
125                         goto found;\r
126         goto err_dev_type;\r
127 \r
128 found:\r
129         context->num_qps                        = p_resp->qp_tab_size;\r
130         context->qp_table_shift = ffsl(context->num_qps) - 1 - MLX4_QP_TABLE_BITS;\r
131         context->qp_table_mask  = (1 << context->qp_table_shift) - 1;\r
132 \r
133         for (i = 0; i < MLX4_QP_TABLE_SIZE; ++i)\r
134                 context->qp_table[i].refcnt = 0;\r
135 \r
136 #ifdef XRC_SUPPORT\r
137         context->num_xrc_srqs   = p_resp->qp_tab_size;\r
138         context->xrc_srq_table_shift = ffsl(context->num_xrc_srqs) - 1\r
139                                        - MLX4_XRC_SRQ_TABLE_BITS;\r
140         context->xrc_srq_table_mask = (1 << context->xrc_srq_table_shift) - 1;\r
141 \r
142         for (i = 0; i < MLX4_XRC_SRQ_TABLE_SIZE; ++i)\r
143                 context->xrc_srq_table[i].refcnt = 0;\r
144 #endif\r
145 \r
146         for (i = 0; i < MLX4_NUM_DB_TYPE; ++i)\r
147                 context->db_list[i] = NULL;\r
148 \r
149         context->uar                    = (uint8_t *)(uintptr_t)p_resp->uar_addr;\r
150         context->bf_page                = (uint8_t *)(uintptr_t)p_resp->bf_page;\r
151         context->bf_buf_size    = p_resp->bf_buf_size;\r
152         context->bf_offset      = p_resp->bf_offset;\r
153 \r
154         context->max_qp_wr      = p_resp->max_qp_wr;\r
155         context->max_sge                = p_resp->max_sge;\r
156         context->max_cqe                = p_resp->max_cqe;\r
157 \r
158         GetSystemInfo(&sys_info);\r
159         context->ibv_ctx.page_size = sys_info.dwPageSize;\r
160 \r
161         return &context->ibv_ctx;\r
162 \r
163 err_dev_type:\r
164         mlx4_free_context(&context->ibv_ctx);\r
165         return NULL;\r
166 }\r
167 \r
168 void mlx4_free_context(struct ibv_context *ctx)\r
169 {\r
170         struct mlx4_context *context = to_mctx(ctx);\r
171 \r
172         cl_spinlock_destroy(&context->bf_lock);\r
173         cl_spinlock_destroy(&context->uar_lock);\r
174         CloseHandle(context->db_list_mutex);\r
175 #ifdef XRC_SUPPORT\r
176         CloseHandle(context->xrc_srq_table_mutex);\r
177 #endif\r
178         CloseHandle(context->qp_table_mutex);\r
179         cl_free(context);\r
180 }\r
181 \r
182 static void __get_uvp_interface(uvp_interface_t *p_uvp)\r
183 {\r
184         p_uvp->pre_open_ca              = mlx4_pre_open_ca;\r
185         p_uvp->post_open_ca             = mlx4_post_open_ca;\r
186         p_uvp->pre_query_ca             = NULL;\r
187         p_uvp->post_query_ca    = NULL;\r
188         p_uvp->pre_modify_ca    = NULL;\r
189         p_uvp->post_modify_ca   = NULL;\r
190         p_uvp->pre_close_ca             = NULL;\r
191         p_uvp->post_close_ca    = mlx4_post_close_ca;\r
192 \r
193 \r
194         /*\r
195          * Protection Domain\r
196          */\r
197         p_uvp->pre_allocate_pd          = mlx4_pre_alloc_pd;\r
198         p_uvp->post_allocate_pd         = mlx4_post_alloc_pd;\r
199         p_uvp->pre_deallocate_pd        = NULL;\r
200         p_uvp->post_deallocate_pd       = mlx4_post_free_pd;\r
201 \r
202 \r
203         /*\r
204          * SRQ Management Verbs\r
205          */\r
206         p_uvp->pre_create_srq   = mlx4_pre_create_srq;\r
207         p_uvp->post_create_srq  = mlx4_post_create_srq;\r
208         p_uvp->pre_query_srq    = NULL;\r
209         p_uvp->post_query_srq   = NULL;\r
210         p_uvp->pre_modify_srq   = NULL;\r
211         p_uvp->post_modify_srq  = NULL;\r
212         p_uvp->pre_destroy_srq  = NULL;\r
213         p_uvp->post_destroy_srq = mlx4_post_destroy_srq;\r
214 \r
215         p_uvp->pre_create_qp    = mlx4_pre_create_qp;\r
216         p_uvp->wv_pre_create_qp = mlx4_wv_pre_create_qp;\r
217         p_uvp->post_create_qp   = mlx4_post_create_qp;\r
218         p_uvp->pre_modify_qp    = mlx4_pre_modify_qp;\r
219         p_uvp->post_modify_qp   = mlx4_post_modify_qp;\r
220         p_uvp->pre_query_qp             = NULL;\r
221         p_uvp->post_query_qp    = mlx4_post_query_qp;\r
222         p_uvp->pre_destroy_qp   = mlx4_pre_destroy_qp;\r
223         p_uvp->post_destroy_qp  = mlx4_post_destroy_qp;\r
224         p_uvp->nd_modify_qp             = mlx4_nd_modify_qp;\r
225         p_uvp->nd_get_qp_state  = mlx4_nd_get_qp_state;\r
226 \r
227 \r
228         /*\r
229          * Completion Queue Management Verbs\r
230          */\r
231         p_uvp->pre_create_cq    = mlx4_pre_create_cq;\r
232         p_uvp->post_create_cq   = mlx4_post_create_cq;\r
233         p_uvp->pre_query_cq             = mlx4_pre_query_cq;\r
234         p_uvp->post_query_cq    = NULL;\r
235         p_uvp->pre_resize_cq    = NULL;\r
236         p_uvp->post_resize_cq   = NULL;\r
237         p_uvp->pre_destroy_cq   = NULL;\r
238         p_uvp->post_destroy_cq  = mlx4_post_destroy_cq;\r
239 \r
240 \r
241         /*\r
242          * AV Management\r
243          */\r
244         p_uvp->pre_create_av    = mlx4_pre_create_ah;\r
245         p_uvp->post_create_av   = NULL;\r
246         p_uvp->pre_query_av             = mlx4_pre_query_ah;\r
247         p_uvp->post_query_av    = mlx4_post_query_ah;\r
248         p_uvp->pre_modify_av    = mlx4_pre_modify_ah;\r
249         p_uvp->post_modify_av   = NULL;\r
250         p_uvp->pre_destroy_av   = mlx4_pre_destroy_ah;\r
251         p_uvp->post_destroy_av  = NULL;\r
252 \r
253 \r
254         /*\r
255          * Memory Region / Window Management Verbs\r
256          */\r
257         p_uvp->pre_create_mw    = NULL;\r
258         p_uvp->post_create_mw   = NULL;\r
259         p_uvp->pre_query_mw     = NULL;\r
260         p_uvp->post_query_mw    = NULL;\r
261         p_uvp->pre_destroy_mw   = NULL;\r
262         p_uvp->post_destroy_mw  = NULL;\r
263 \r
264 \r
265         /*\r
266          * Multicast Support Verbs\r
267          */\r
268         p_uvp->pre_attach_mcast         = NULL;\r
269         p_uvp->post_attach_mcast        = NULL;\r
270         p_uvp->pre_detach_mcast         = NULL;\r
271         p_uvp->post_detach_mcast        = NULL;\r
272 \r
273 \r
274         /*\r
275          * OS bypass (send, receive, poll/notify cq)\r
276          */\r
277         p_uvp->post_send                = mlx4_post_send;\r
278         p_uvp->post_recv                = mlx4_post_recv;\r
279         p_uvp->post_srq_recv    = mlx4_post_srq_recv;\r
280         p_uvp->poll_cq                  = mlx4_poll_cq_list;\r
281         p_uvp->poll_cq_array    = mlx4_poll_cq_array;\r
282         p_uvp->rearm_cq                 = mlx4_arm_cq;\r
283         p_uvp->rearm_n_cq               = NULL;\r
284         p_uvp->peek_cq                  = NULL;\r
285         p_uvp->bind_mw                  = NULL;\r
286 }\r
287 \r
288 /* TODO: define and expose XRC through new interface GUID */\r
289 #ifdef XRC_SUPPORT\r
290 static void __get_xrc_interface(uvp_xrc_interface_t *p_xrc)\r
291 {\r
292         /*\r
293          * XRC Management Verbs\r
294          */\r
295         p_uvp->pre_create_xrc_srq               = mlx4_pre_create_xrc_srq;\r
296         p_uvp->post_create_xrc_srq              = mlx4_post_create_xrc_srq;\r
297         p_uvp->pre_open_xrc_domain              = mlx4_pre_open_xrc_domain;\r
298         p_uvp->post_open_xrc_domain             = mlx4_post_open_xrc_domain;\r
299         p_uvp->pre_close_xrc_domain             = NULL;\r
300         p_uvp->post_close_xrc_domain    = mlx4_post_close_xrc_domain;\r
301         p_uvp->pre_create_xrc_rcv_qp    = NULL;\r
302         p_uvp->post_create_xrc_rcv_qp   = NULL;\r
303         p_uvp->pre_modify_xrc_rcv_qp    = NULL;\r
304         p_uvp->post_modify_xrc_rcv_qp   = NULL;\r
305         p_uvp->pre_query_xrc_rcv_qp             = NULL;\r
306         p_uvp->post_query_xrc_rcv_qp    = NULL;\r
307         p_uvp->pre_reg_xrc_rcv_qp               = NULL;\r
308         p_uvp->post_reg_xrc_rcv_qp              = NULL;\r
309         p_uvp->pre_unreg_xrc_rcv_qp             = NULL;\r
310         p_uvp->post_unreg_xrc_rcv_qp    = NULL;\r
311 }\r
312 #endif\r
313 \r
314 __declspec(dllexport) ib_api_status_t\r
315 uvp_get_interface (GUID iid, void* pifc)\r
316 {\r
317         ib_api_status_t status = IB_SUCCESS;\r
318 \r
319         if (IsEqualGUID(&iid, &IID_UVP))\r
320         {\r
321                 __get_uvp_interface((uvp_interface_t *) pifc);\r
322         }\r
323         else\r
324         {\r
325                 status = IB_UNSUPPORTED;\r
326         }\r
327 \r
328         return status;\r
329 }\r