2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
\r
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
\r
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
\r
5 * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.
\r
7 * This software is available to you under the OpenIB.org BSD license
\r
10 * Redistribution and use in source and binary forms, with or
\r
11 * without modification, are permitted provided that the following
\r
12 * conditions are met:
\r
14 * - Redistributions of source code must retain the above
\r
15 * copyright notice, this list of conditions and the following
\r
18 * - Redistributions in binary form must reproduce the above
\r
19 * copyright notice, this list of conditions and the following
\r
20 * disclaimer in the documentation and/or other materials
\r
21 * provided with the distribution.
\r
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
35 #include <ib_verbs.h>
\r
39 #include "mthca_dev.h"
\r
40 #if defined(EVENT_TRACING)
\r
44 #include "mthca_mad.tmh"
\r
46 #include "mthca_cmd.h"
\r
48 struct mthca_trap_mad {
\r
49 struct scatterlist sg;
\r
52 static void update_sm_ah(struct mthca_dev *dev,
\r
53 u8 port_num, u16 lid, u8 sl)
\r
55 struct ib_ah *new_ah;
\r
56 struct ib_ah_attr ah_attr;
\r
59 if (!dev->send_agent[port_num - 1][0])
\r
62 RtlZeroMemory(&ah_attr, sizeof ah_attr);
\r
65 ah_attr.port_num = port_num;
\r
67 new_ah = ibv_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
\r
68 &ah_attr, NULL, NULL);
\r
72 spin_lock_irqsave(&dev->sm_lock, &lh);
\r
73 if (dev->sm_ah[port_num - 1]) {
\r
74 ibv_destroy_ah(dev->sm_ah[port_num - 1]);
\r
76 dev->sm_ah[port_num - 1] = new_ah;
\r
77 spin_unlock_irqrestore(&lh);
\r
81 * Snoop SM MADs for port info and P_Key table sets, so we can
\r
82 * synthesize LID change and P_Key change events.
\r
84 static void smp_snoop(struct ib_device *ibdev,
\r
88 struct ib_event event;
\r
90 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
\r
91 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
\r
92 mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
\r
93 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
\r
94 update_sm_ah(to_mdev(ibdev), port_num,
\r
95 cl_ntoh16(*(__be16 *) (mad->data + 58)),
\r
96 (*(u8 *) (mad->data + 76)) & 0xf);
\r
98 event.device = ibdev;
\r
99 event.event = IB_EVENT_LID_CHANGE;
\r
100 event.element.port_num = port_num;
\r
101 ib_dispatch_event(&event);
\r
104 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
\r
105 event.device = ibdev;
\r
106 event.event = IB_EVENT_PKEY_CHANGE;
\r
107 event.element.port_num = port_num;
\r
108 ib_dispatch_event(&event);
\r
113 static void forward_trap(struct mthca_dev *dev,
\r
115 struct ib_mad *mad)
\r
117 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
\r
118 struct mthca_trap_mad *tmad;
\r
119 struct ib_sge gather_list;
\r
120 struct _ib_send_wr wr;
\r
121 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
\r
123 SPIN_LOCK_PREP(lh);
\r
125 /* fill the template */
\r
126 wr.ds_array = (ib_local_ds_t*)(void*)&gather_list;
\r
128 wr.wr_type = WR_SEND;
\r
129 wr.send_opt = IB_SEND_OPT_SIGNALED;
\r
130 wr.dgrm.ud.remote_qp = cl_hton32(qpn);
\r
131 wr.dgrm.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0;
\r
134 tmad = kmalloc(sizeof *tmad, GFP_KERNEL);
\r
138 alloc_dma_zmem(dev, sizeof *mad, &tmad->sg);
\r
139 if (!tmad->sg.page) {
\r
144 memcpy(tmad->sg.page, mad, sizeof *mad);
\r
146 wr.dgrm.ud.rsvd = (void*)&((struct ib_mad *)tmad->sg.page)->mad_hdr;
\r
147 wr.wr_id = (u64)(ULONG_PTR)tmad;
\r
148 gather_list.addr = tmad->sg.dma_address;
\r
149 gather_list.length = tmad->sg.length;
\r
150 gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey;
\r
153 * We rely here on the fact that MLX QPs don't use the
\r
154 * address handle after the send is posted (this is
\r
155 * wrong following the IB spec strictly, but we know
\r
156 * it's OK for our devices).
\r
158 spin_lock_irqsave(&dev->sm_lock, &lh);
\r
159 wr.dgrm.ud.h_av = (ib_av_handle_t)dev->sm_ah[port_num - 1];
\r
160 if (wr.dgrm.ud.h_av) {
\r
161 HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,(" ib_post_send_mad not ported \n" ));
\r
166 spin_unlock_irqrestore(&lh);
\r
169 free_dma_mem_map(dev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );
\r
175 int mthca_process_mad(struct ib_device *ibdev,
\r
178 struct _ib_wc *in_wc,
\r
179 struct _ib_grh *in_grh,
\r
180 struct ib_mad *in_mad,
\r
181 struct ib_mad *out_mad)
\r
185 u16 slid = in_wc ? in_wc->recv.ud.remote_lid : cl_ntoh16(IB_LID_PERMISSIVE);
\r
187 HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("in: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x\n",
\r
188 (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method,
\r
189 (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod,
\r
190 (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid ));
\r
192 /* Forward locally generated traps to the SM */
\r
193 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
\r
195 forward_trap(to_mdev(ibdev), port_num, in_mad);
\r
196 HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Not sent, but locally forwarded\n"));
\r
197 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
\r
201 * Only handle SM gets, sets and trap represses for SM class
\r
203 * Only handle PMA and Mellanox vendor-specific class gets and
\r
204 * sets for other classes.
\r
206 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
\r
207 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
\r
209 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
\r
210 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
\r
211 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) {
\r
212 HCA_PRINT( TRACE_LEVEL_VERBOSE,HCA_DBG_MAD,(" Skip some methods. Nothing done !\n"));
\r
213 return IB_MAD_RESULT_SUCCESS;
\r
217 * Don't process SMInfo queries or vendor-specific
\r
218 * MADs -- the SMA can't handle them.
\r
220 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
\r
221 ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
\r
222 IB_SMP_ATTR_VENDOR_MASK)) {
\r
223 HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip SMInfo queries or vendor-specific MADs. Nothing done !\n"));
\r
224 return IB_MAD_RESULT_SUCCESS;
\r
228 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
\r
229 in_mad->mad_hdr.mgmt_class == IB_MLX_VENDOR_CLASS1 ||
\r
230 in_mad->mad_hdr.mgmt_class == IB_MLX_VENDOR_CLASS2) {
\r
232 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
\r
233 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) {
\r
234 HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip some management methods. Nothing done !\n"));
\r
235 return IB_MAD_RESULT_SUCCESS;
\r
239 HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip IB_MGMT_CLASS_PERF_MGMT et al. Nothing done !\n"));
\r
240 return IB_MAD_RESULT_SUCCESS;
\r
245 err = mthca_MAD_IFC(to_mdev(ibdev),
\r
246 mad_flags & IB_MAD_IGNORE_MKEY,
\r
247 mad_flags & IB_MAD_IGNORE_BKEY,
\r
248 port_num, in_wc, in_grh, in_mad, out_mad,
\r
251 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC failed\n"));
\r
252 return IB_MAD_RESULT_FAILURE;
\r
254 if (status == MTHCA_CMD_STAT_BAD_PKT)
\r
255 return IB_MAD_RESULT_SUCCESS;
\r
257 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC returned status %02x\n", status));
\r
258 return IB_MAD_RESULT_FAILURE;
\r
261 if (!out_mad->mad_hdr.status)
\r
262 smp_snoop(ibdev, port_num, in_mad);
\r
264 HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD,("out: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x, Status %x\n",
\r
265 (u32)out_mad->mad_hdr.mgmt_class, (u32)out_mad->mad_hdr.method,
\r
266 (u32)out_mad->mad_hdr.attr_id, out_mad->mad_hdr.attr_mod,
\r
267 (u32)out_mad->mad_hdr.class_specific, out_mad->mad_hdr.tid,
\r
268 (u32)out_mad->mad_hdr.status ));
\r
270 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) {
\r
271 /* no response for trap repress */
\r
272 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
\r
275 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
\r
278 static void send_handler(struct ib_mad_agent *agent,
\r
279 struct ib_mad_send_wc *mad_send_wc)
\r
281 struct mthca_trap_mad *tmad =
\r
282 (void *) (ULONG_PTR) mad_send_wc->wr_id;
\r
284 free_dma_mem_map(agent->device->mdev, &tmad->sg, PCI_DMA_BIDIRECTIONAL );
\r