Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <rdma/ib_mad.h> | |
34 | #include <rdma/ib_smi.h> | |
35 | ||
36 | #include <linux/mlx4/cmd.h> | |
5a0e3ad6 | 37 | #include <linux/gfp.h> |
c3779134 | 38 | #include <rdma/ib_pma.h> |
225c7b1f RD |
39 | |
40 | #include "mlx4_ib.h" | |
41 | ||
42 | enum { | |
43 | MLX4_IB_VENDOR_CLASS1 = 0x9, | |
44 | MLX4_IB_VENDOR_CLASS2 = 0xa | |
45 | }; | |
46 | ||
47 | int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, | |
48 | int port, struct ib_wc *in_wc, struct ib_grh *in_grh, | |
49 | void *in_mad, void *response_mad) | |
50 | { | |
51 | struct mlx4_cmd_mailbox *inmailbox, *outmailbox; | |
52 | void *inbox; | |
53 | int err; | |
54 | u32 in_modifier = port; | |
55 | u8 op_modifier = 0; | |
56 | ||
57 | inmailbox = mlx4_alloc_cmd_mailbox(dev->dev); | |
58 | if (IS_ERR(inmailbox)) | |
59 | return PTR_ERR(inmailbox); | |
60 | inbox = inmailbox->buf; | |
61 | ||
62 | outmailbox = mlx4_alloc_cmd_mailbox(dev->dev); | |
63 | if (IS_ERR(outmailbox)) { | |
64 | mlx4_free_cmd_mailbox(dev->dev, inmailbox); | |
65 | return PTR_ERR(outmailbox); | |
66 | } | |
67 | ||
68 | memcpy(inbox, in_mad, 256); | |
69 | ||
70 | /* | |
71 | * Key check traps can't be generated unless we have in_wc to | |
72 | * tell us where to send the trap. | |
73 | */ | |
74 | if (ignore_mkey || !in_wc) | |
75 | op_modifier |= 0x1; | |
76 | if (ignore_bkey || !in_wc) | |
77 | op_modifier |= 0x2; | |
78 | ||
79 | if (in_wc) { | |
80 | struct { | |
81 | __be32 my_qpn; | |
82 | u32 reserved1; | |
83 | __be32 rqpn; | |
84 | u8 sl; | |
85 | u8 g_path; | |
86 | u16 reserved2[2]; | |
87 | __be16 pkey; | |
88 | u32 reserved3[11]; | |
89 | u8 grh[40]; | |
90 | } *ext_info; | |
91 | ||
92 | memset(inbox + 256, 0, 256); | |
93 | ext_info = inbox + 256; | |
94 | ||
95 | ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num); | |
96 | ext_info->rqpn = cpu_to_be32(in_wc->src_qp); | |
97 | ext_info->sl = in_wc->sl << 4; | |
98 | ext_info->g_path = in_wc->dlid_path_bits | | |
99 | (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); | |
100 | ext_info->pkey = cpu_to_be16(in_wc->pkey_index); | |
101 | ||
102 | if (in_grh) | |
103 | memcpy(ext_info->grh, in_grh, 40); | |
104 | ||
105 | op_modifier |= 0x4; | |
106 | ||
107 | in_modifier |= in_wc->slid << 16; | |
108 | } | |
109 | ||
110 | err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, | |
111 | in_modifier, op_modifier, | |
f9baff50 JM |
112 | MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, |
113 | MLX4_CMD_NATIVE); | |
225c7b1f | 114 | |
fe11cb6b | 115 | if (!err) |
225c7b1f RD |
116 | memcpy(response_mad, outmailbox->buf, 256); |
117 | ||
118 | mlx4_free_cmd_mailbox(dev->dev, inmailbox); | |
119 | mlx4_free_cmd_mailbox(dev->dev, outmailbox); | |
120 | ||
121 | return err; | |
122 | } | |
123 | ||
124 | static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) | |
125 | { | |
126 | struct ib_ah *new_ah; | |
127 | struct ib_ah_attr ah_attr; | |
df7fba66 | 128 | unsigned long flags; |
225c7b1f RD |
129 | |
130 | if (!dev->send_agent[port_num - 1][0]) | |
131 | return; | |
132 | ||
133 | memset(&ah_attr, 0, sizeof ah_attr); | |
134 | ah_attr.dlid = lid; | |
135 | ah_attr.sl = sl; | |
136 | ah_attr.port_num = port_num; | |
137 | ||
138 | new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, | |
139 | &ah_attr); | |
140 | if (IS_ERR(new_ah)) | |
141 | return; | |
142 | ||
df7fba66 | 143 | spin_lock_irqsave(&dev->sm_lock, flags); |
225c7b1f RD |
144 | if (dev->sm_ah[port_num - 1]) |
145 | ib_destroy_ah(dev->sm_ah[port_num - 1]); | |
146 | dev->sm_ah[port_num - 1] = new_ah; | |
df7fba66 | 147 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
225c7b1f RD |
148 | } |
149 | ||
150 | /* | |
00f5ce99 JM |
151 | * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can |
152 | * synthesize LID change, Client-Rereg, GID change, and P_Key change events. | |
225c7b1f | 153 | */ |
f0f6f346 | 154 | static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, |
00f5ce99 | 155 | u16 prev_lid) |
225c7b1f | 156 | { |
00f5ce99 JM |
157 | struct ib_port_info *pinfo; |
158 | u16 lid; | |
225c7b1f | 159 | |
00f5ce99 | 160 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
225c7b1f RD |
161 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
162 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | |
00f5ce99 JM |
163 | mad->mad_hdr.method == IB_MGMT_METHOD_SET) |
164 | switch (mad->mad_hdr.attr_id) { | |
165 | case IB_SMP_ATTR_PORT_INFO: | |
166 | pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data; | |
167 | lid = be16_to_cpu(pinfo->lid); | |
225c7b1f | 168 | |
00f5ce99 | 169 | update_sm_ah(dev, port_num, |
225c7b1f RD |
170 | be16_to_cpu(pinfo->sm_lid), |
171 | pinfo->neighbormtu_mastersmsl & 0xf); | |
172 | ||
00f5ce99 JM |
173 | if (pinfo->clientrereg_resv_subnetto & 0x80) |
174 | mlx4_ib_dispatch_event(dev, port_num, | |
175 | IB_EVENT_CLIENT_REREGISTER); | |
225c7b1f | 176 | |
00f5ce99 JM |
177 | if (prev_lid != lid) |
178 | mlx4_ib_dispatch_event(dev, port_num, | |
179 | IB_EVENT_LID_CHANGE); | |
180 | break; | |
225c7b1f | 181 | |
00f5ce99 JM |
182 | case IB_SMP_ATTR_PKEY_TABLE: |
183 | mlx4_ib_dispatch_event(dev, port_num, | |
184 | IB_EVENT_PKEY_CHANGE); | |
185 | break; | |
225c7b1f | 186 | |
00f5ce99 | 187 | case IB_SMP_ATTR_GUID_INFO: |
6634961c JM |
188 | /* paravirtualized master's guid is guid 0 -- does not change */ |
189 | if (!mlx4_is_master(dev->dev)) | |
190 | mlx4_ib_dispatch_event(dev, port_num, | |
191 | IB_EVENT_GID_CHANGE); | |
00f5ce99 JM |
192 | break; |
193 | default: | |
194 | break; | |
225c7b1f | 195 | } |
225c7b1f RD |
196 | } |
197 | ||
198 | static void node_desc_override(struct ib_device *dev, | |
199 | struct ib_mad *mad) | |
200 | { | |
df7fba66 JM |
201 | unsigned long flags; |
202 | ||
225c7b1f RD |
203 | if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
204 | mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | |
205 | mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && | |
206 | mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { | |
df7fba66 | 207 | spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); |
225c7b1f | 208 | memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); |
df7fba66 | 209 | spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); |
225c7b1f RD |
210 | } |
211 | } | |
212 | ||
213 | static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *mad) | |
214 | { | |
215 | int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; | |
216 | struct ib_mad_send_buf *send_buf; | |
217 | struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; | |
218 | int ret; | |
df7fba66 | 219 | unsigned long flags; |
225c7b1f RD |
220 | |
221 | if (agent) { | |
222 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, | |
223 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | |
13974909 DC |
224 | if (IS_ERR(send_buf)) |
225 | return; | |
225c7b1f RD |
226 | /* |
227 | * We rely here on the fact that MLX QPs don't use the | |
228 | * address handle after the send is posted (this is | |
229 | * wrong following the IB spec strictly, but we know | |
230 | * it's OK for our devices). | |
231 | */ | |
df7fba66 | 232 | spin_lock_irqsave(&dev->sm_lock, flags); |
225c7b1f RD |
233 | memcpy(send_buf->mad, mad, sizeof *mad); |
234 | if ((send_buf->ah = dev->sm_ah[port_num - 1])) | |
235 | ret = ib_post_send_mad(send_buf, NULL); | |
236 | else | |
237 | ret = -EINVAL; | |
df7fba66 | 238 | spin_unlock_irqrestore(&dev->sm_lock, flags); |
225c7b1f RD |
239 | |
240 | if (ret) | |
241 | ib_free_send_mad(send_buf); | |
242 | } | |
243 | } | |
244 | ||
c3779134 | 245 | static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, |
225c7b1f RD |
246 | struct ib_wc *in_wc, struct ib_grh *in_grh, |
247 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
248 | { | |
f0f6f346 | 249 | u16 slid, prev_lid = 0; |
225c7b1f | 250 | int err; |
f0f6f346 | 251 | struct ib_port_attr pattr; |
225c7b1f | 252 | |
b1d8eb5a JM |
253 | if (in_wc && in_wc->qp->qp_num) { |
254 | pr_debug("received MAD: slid:%d sqpn:%d " | |
255 | "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n", | |
256 | in_wc->slid, in_wc->src_qp, | |
257 | in_wc->dlid_path_bits, | |
258 | in_wc->qp->qp_num, | |
259 | in_wc->wc_flags, | |
260 | in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method, | |
261 | be16_to_cpu(in_mad->mad_hdr.attr_id)); | |
262 | if (in_wc->wc_flags & IB_WC_GRH) { | |
263 | pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n", | |
264 | be64_to_cpu(in_grh->sgid.global.subnet_prefix), | |
265 | be64_to_cpu(in_grh->sgid.global.interface_id)); | |
266 | pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n", | |
267 | be64_to_cpu(in_grh->dgid.global.subnet_prefix), | |
268 | be64_to_cpu(in_grh->dgid.global.interface_id)); | |
269 | } | |
270 | } | |
271 | ||
225c7b1f RD |
272 | slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); |
273 | ||
274 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { | |
275 | forward_trap(to_mdev(ibdev), port_num, in_mad); | |
276 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
277 | } | |
278 | ||
279 | if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || | |
280 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | |
281 | if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && | |
282 | in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && | |
283 | in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) | |
284 | return IB_MAD_RESULT_SUCCESS; | |
285 | ||
286 | /* | |
a6f7feae | 287 | * Don't process SMInfo queries -- the SMA can't handle them. |
225c7b1f | 288 | */ |
a6f7feae | 289 | if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO) |
225c7b1f RD |
290 | return IB_MAD_RESULT_SUCCESS; |
291 | } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || | |
292 | in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 || | |
6578cf33 EC |
293 | in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 || |
294 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) { | |
225c7b1f RD |
295 | if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && |
296 | in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) | |
297 | return IB_MAD_RESULT_SUCCESS; | |
298 | } else | |
299 | return IB_MAD_RESULT_SUCCESS; | |
300 | ||
f0f6f346 MS |
301 | if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || |
302 | in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && | |
303 | in_mad->mad_hdr.method == IB_MGMT_METHOD_SET && | |
304 | in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && | |
305 | !ib_query_port(ibdev, port_num, &pattr)) | |
306 | prev_lid = pattr.lid; | |
307 | ||
225c7b1f RD |
308 | err = mlx4_MAD_IFC(to_mdev(ibdev), |
309 | mad_flags & IB_MAD_IGNORE_MKEY, | |
310 | mad_flags & IB_MAD_IGNORE_BKEY, | |
311 | port_num, in_wc, in_grh, in_mad, out_mad); | |
312 | if (err) | |
313 | return IB_MAD_RESULT_FAILURE; | |
314 | ||
315 | if (!out_mad->mad_hdr.status) { | |
00f5ce99 JM |
316 | if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) |
317 | smp_snoop(ibdev, port_num, in_mad, prev_lid); | |
225c7b1f RD |
318 | node_desc_override(ibdev, out_mad); |
319 | } | |
320 | ||
321 | /* set return bit in status of directed route responses */ | |
322 | if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
323 | out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); | |
324 | ||
325 | if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) | |
326 | /* no response for trap repress */ | |
327 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
328 | ||
329 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | |
330 | } | |
331 | ||
c3779134 OG |
332 | static void edit_counter(struct mlx4_counter *cnt, |
333 | struct ib_pma_portcounters *pma_cnt) | |
334 | { | |
335 | pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2)); | |
336 | pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2)); | |
337 | pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames)); | |
338 | pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames)); | |
339 | } | |
340 | ||
341 | static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |
342 | struct ib_wc *in_wc, struct ib_grh *in_grh, | |
343 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
344 | { | |
345 | struct mlx4_cmd_mailbox *mailbox; | |
346 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | |
347 | int err; | |
348 | u32 inmod = dev->counters[port_num - 1] & 0xffff; | |
349 | u8 mode; | |
350 | ||
351 | if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) | |
352 | return -EINVAL; | |
353 | ||
354 | mailbox = mlx4_alloc_cmd_mailbox(dev->dev); | |
355 | if (IS_ERR(mailbox)) | |
356 | return IB_MAD_RESULT_FAILURE; | |
357 | ||
358 | err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0, | |
f9baff50 JM |
359 | MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, |
360 | MLX4_CMD_WRAPPED); | |
c3779134 OG |
361 | if (err) |
362 | err = IB_MAD_RESULT_FAILURE; | |
363 | else { | |
364 | memset(out_mad->data, 0, sizeof out_mad->data); | |
365 | mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode; | |
366 | switch (mode & 0xf) { | |
367 | case 0: | |
368 | edit_counter(mailbox->buf, | |
369 | (void *)(out_mad->data + 40)); | |
370 | err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | |
371 | break; | |
372 | default: | |
373 | err = IB_MAD_RESULT_FAILURE; | |
374 | } | |
375 | } | |
376 | ||
377 | mlx4_free_cmd_mailbox(dev->dev, mailbox); | |
378 | ||
379 | return err; | |
380 | } | |
381 | ||
382 | int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, | |
383 | struct ib_wc *in_wc, struct ib_grh *in_grh, | |
384 | struct ib_mad *in_mad, struct ib_mad *out_mad) | |
385 | { | |
386 | switch (rdma_port_get_link_layer(ibdev, port_num)) { | |
387 | case IB_LINK_LAYER_INFINIBAND: | |
388 | return ib_process_mad(ibdev, mad_flags, port_num, in_wc, | |
389 | in_grh, in_mad, out_mad); | |
390 | case IB_LINK_LAYER_ETHERNET: | |
391 | return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, | |
392 | in_grh, in_mad, out_mad); | |
393 | default: | |
394 | return -EINVAL; | |
395 | } | |
396 | } | |
397 | ||
225c7b1f RD |
398 | static void send_handler(struct ib_mad_agent *agent, |
399 | struct ib_mad_send_wc *mad_send_wc) | |
400 | { | |
401 | ib_free_send_mad(mad_send_wc->send_buf); | |
402 | } | |
403 | ||
404 | int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |
405 | { | |
406 | struct ib_mad_agent *agent; | |
407 | int p, q; | |
408 | int ret; | |
fa417f7b | 409 | enum rdma_link_layer ll; |
225c7b1f | 410 | |
fa417f7b EC |
411 | for (p = 0; p < dev->num_ports; ++p) { |
412 | ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); | |
225c7b1f | 413 | for (q = 0; q <= 1; ++q) { |
fa417f7b EC |
414 | if (ll == IB_LINK_LAYER_INFINIBAND) { |
415 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, | |
416 | q ? IB_QPT_GSI : IB_QPT_SMI, | |
417 | NULL, 0, send_handler, | |
418 | NULL, NULL); | |
419 | if (IS_ERR(agent)) { | |
420 | ret = PTR_ERR(agent); | |
421 | goto err; | |
422 | } | |
423 | dev->send_agent[p][q] = agent; | |
424 | } else | |
425 | dev->send_agent[p][q] = NULL; | |
225c7b1f | 426 | } |
fa417f7b | 427 | } |
225c7b1f RD |
428 | |
429 | return 0; | |
430 | ||
431 | err: | |
7ff93f8b | 432 | for (p = 0; p < dev->num_ports; ++p) |
225c7b1f RD |
433 | for (q = 0; q <= 1; ++q) |
434 | if (dev->send_agent[p][q]) | |
435 | ib_unregister_mad_agent(dev->send_agent[p][q]); | |
436 | ||
437 | return ret; | |
438 | } | |
439 | ||
440 | void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) | |
441 | { | |
442 | struct ib_mad_agent *agent; | |
443 | int p, q; | |
444 | ||
7ff93f8b | 445 | for (p = 0; p < dev->num_ports; ++p) { |
225c7b1f RD |
446 | for (q = 0; q <= 1; ++q) { |
447 | agent = dev->send_agent[p][q]; | |
fa417f7b EC |
448 | if (agent) { |
449 | dev->send_agent[p][q] = NULL; | |
450 | ib_unregister_mad_agent(agent); | |
451 | } | |
225c7b1f RD |
452 | } |
453 | ||
454 | if (dev->sm_ah[p]) | |
455 | ib_destroy_ah(dev->sm_ah[p]); | |
456 | } | |
457 | } | |
00f5ce99 JM |
458 | |
459 | void handle_port_mgmt_change_event(struct work_struct *work) | |
460 | { | |
461 | struct ib_event_work *ew = container_of(work, struct ib_event_work, work); | |
462 | struct mlx4_ib_dev *dev = ew->ib_dev; | |
463 | struct mlx4_eqe *eqe = &(ew->ib_eqe); | |
464 | u8 port = eqe->event.port_mgmt_change.port; | |
465 | u32 changed_attr; | |
466 | ||
467 | switch (eqe->subtype) { | |
468 | case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: | |
469 | changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); | |
470 | ||
471 | /* Update the SM ah - This should be done before handling | |
472 | the other changed attributes so that MADs can be sent to the SM */ | |
473 | if (changed_attr & MSTR_SM_CHANGE_MASK) { | |
474 | u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); | |
475 | u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf; | |
476 | update_sm_ah(dev, port, lid, sl); | |
477 | } | |
478 | ||
479 | /* Check if it is a lid change event */ | |
480 | if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) | |
481 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE); | |
482 | ||
483 | /* Generate GUID changed event */ | |
484 | if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) | |
485 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); | |
486 | ||
487 | if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) | |
488 | mlx4_ib_dispatch_event(dev, port, | |
489 | IB_EVENT_CLIENT_REREGISTER); | |
490 | break; | |
491 | ||
492 | case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: | |
493 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); | |
494 | break; | |
495 | case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: | |
6634961c JM |
496 | /* paravirtualized master's guid is guid 0 -- does not change */ |
497 | if (!mlx4_is_master(dev->dev)) | |
498 | mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); | |
00f5ce99 JM |
499 | break; |
500 | default: | |
501 | pr_warn("Unsupported subtype 0x%x for " | |
502 | "Port Management Change event\n", eqe->subtype); | |
503 | } | |
504 | ||
505 | kfree(ew); | |
506 | } | |
507 | ||
508 | void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num, | |
509 | enum ib_event_type type) | |
510 | { | |
511 | struct ib_event event; | |
512 | ||
513 | event.device = &dev->ib_dev; | |
514 | event.element.port_num = port_num; | |
515 | event.event = type; | |
516 | ||
517 | ib_dispatch_event(&event); | |
518 | } |