Commit | Line | Data |
---|---|---|
f931551b | 1 | /* |
1fb9fed6 MM |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | |
f931551b RC |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | */ | |
34 | ||
35 | #include <rdma/ib_smi.h> | |
36 | ||
37 | #include "qib.h" | |
38 | #include "qib_mad.h" | |
39 | ||
40 | static int reply(struct ib_smp *smp) | |
41 | { | |
42 | /* | |
43 | * The verbs framework will handle the directed/LID route | |
44 | * packet changes. | |
45 | */ | |
46 | smp->method = IB_MGMT_METHOD_GET_RESP; | |
47 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
48 | smp->status |= IB_SMP_DIRECTION; | |
49 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; | |
50 | } | |
51 | ||
36a8f01c MM |
52 | static int reply_failure(struct ib_smp *smp) |
53 | { | |
54 | /* | |
55 | * The verbs framework will handle the directed/LID route | |
56 | * packet changes. | |
57 | */ | |
58 | smp->method = IB_MGMT_METHOD_GET_RESP; | |
59 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | |
60 | smp->status |= IB_SMP_DIRECTION; | |
61 | return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY; | |
62 | } | |
63 | ||
f931551b RC |
64 | static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) |
65 | { | |
66 | struct ib_mad_send_buf *send_buf; | |
67 | struct ib_mad_agent *agent; | |
68 | struct ib_smp *smp; | |
69 | int ret; | |
70 | unsigned long flags; | |
71 | unsigned long timeout; | |
72 | ||
f24a6d48 | 73 | agent = ibp->rvp.send_agent; |
f931551b RC |
74 | if (!agent) |
75 | return; | |
76 | ||
77 | /* o14-3.2.1 */ | |
78 | if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE)) | |
79 | return; | |
80 | ||
81 | /* o14-2 */ | |
f24a6d48 HC |
82 | if (ibp->rvp.trap_timeout && |
83 | time_before(jiffies, ibp->rvp.trap_timeout)) | |
f931551b RC |
84 | return; |
85 | ||
86 | send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, | |
da2dfaa3 IW |
87 | IB_MGMT_MAD_DATA, GFP_ATOMIC, |
88 | IB_MGMT_BASE_VERSION); | |
f931551b RC |
89 | if (IS_ERR(send_buf)) |
90 | return; | |
91 | ||
92 | smp = send_buf->mad; | |
93 | smp->base_version = IB_MGMT_BASE_VERSION; | |
94 | smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; | |
95 | smp->class_version = 1; | |
96 | smp->method = IB_MGMT_METHOD_TRAP; | |
f24a6d48 HC |
97 | ibp->rvp.tid++; |
98 | smp->tid = cpu_to_be64(ibp->rvp.tid); | |
f931551b RC |
99 | smp->attr_id = IB_SMP_ATTR_NOTICE; |
100 | /* o14-1: smp->mkey = 0; */ | |
101 | memcpy(smp->data, data, len); | |
102 | ||
f24a6d48 | 103 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
f931551b | 104 | if (!ibp->sm_ah) { |
f24a6d48 | 105 | if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { |
f931551b | 106 | struct ib_ah *ah; |
f931551b | 107 | |
f24a6d48 | 108 | ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid); |
f931551b | 109 | if (IS_ERR(ah)) |
1fb9fed6 | 110 | ret = PTR_ERR(ah); |
f931551b RC |
111 | else { |
112 | send_buf->ah = ah; | |
96ab1ac1 | 113 | ibp->sm_ah = ibah_to_rvtah(ah); |
f931551b RC |
114 | ret = 0; |
115 | } | |
116 | } else | |
117 | ret = -EINVAL; | |
118 | } else { | |
119 | send_buf->ah = &ibp->sm_ah->ibah; | |
120 | ret = 0; | |
121 | } | |
f24a6d48 | 122 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
f931551b RC |
123 | |
124 | if (!ret) | |
125 | ret = ib_post_send_mad(send_buf, NULL); | |
126 | if (!ret) { | |
127 | /* 4.096 usec. */ | |
f24a6d48 HC |
128 | timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000; |
129 | ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout); | |
f931551b RC |
130 | } else { |
131 | ib_free_send_mad(send_buf); | |
f24a6d48 | 132 | ibp->rvp.trap_timeout = 0; |
f931551b RC |
133 | } |
134 | } | |
135 | ||
136 | /* | |
137 | * Send a bad [PQ]_Key trap (ch. 14.3.8). | |
138 | */ | |
139 | void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, | |
140 | u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) | |
141 | { | |
142 | struct ib_mad_notice_attr data; | |
143 | ||
144 | if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) | |
f24a6d48 | 145 | ibp->rvp.pkey_violations++; |
f931551b | 146 | else |
f24a6d48 HC |
147 | ibp->rvp.qkey_violations++; |
148 | ibp->rvp.n_pkt_drops++; | |
f931551b RC |
149 | |
150 | /* Send violation trap */ | |
151 | data.generic_type = IB_NOTICE_TYPE_SECURITY; | |
152 | data.prod_type_msb = 0; | |
153 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | |
154 | data.trap_num = trap_num; | |
155 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | |
156 | data.toggle_count = 0; | |
041af0bb | 157 | memset(&data.details, 0, sizeof(data.details)); |
f931551b RC |
158 | data.details.ntc_257_258.lid1 = lid1; |
159 | data.details.ntc_257_258.lid2 = lid2; | |
160 | data.details.ntc_257_258.key = cpu_to_be32(key); | |
161 | data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); | |
162 | data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); | |
163 | ||
041af0bb | 164 | qib_send_trap(ibp, &data, sizeof(data)); |
f931551b RC |
165 | } |
166 | ||
167 | /* | |
168 | * Send a bad M_Key trap (ch. 14.3.9). | |
169 | */ | |
170 | static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) | |
171 | { | |
172 | struct ib_mad_notice_attr data; | |
173 | ||
174 | /* Send violation trap */ | |
175 | data.generic_type = IB_NOTICE_TYPE_SECURITY; | |
176 | data.prod_type_msb = 0; | |
177 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | |
178 | data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; | |
179 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | |
180 | data.toggle_count = 0; | |
041af0bb | 181 | memset(&data.details, 0, sizeof(data.details)); |
f931551b RC |
182 | data.details.ntc_256.lid = data.issuer_lid; |
183 | data.details.ntc_256.method = smp->method; | |
184 | data.details.ntc_256.attr_id = smp->attr_id; | |
185 | data.details.ntc_256.attr_mod = smp->attr_mod; | |
186 | data.details.ntc_256.mkey = smp->mkey; | |
187 | if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | |
188 | u8 hop_cnt; | |
189 | ||
190 | data.details.ntc_256.dr_slid = smp->dr_slid; | |
191 | data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; | |
192 | hop_cnt = smp->hop_cnt; | |
193 | if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) { | |
194 | data.details.ntc_256.dr_trunc_hop |= | |
195 | IB_NOTICE_TRAP_DR_TRUNC; | |
196 | hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path); | |
197 | } | |
198 | data.details.ntc_256.dr_trunc_hop |= hop_cnt; | |
199 | memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path, | |
200 | hop_cnt); | |
201 | } | |
202 | ||
041af0bb | 203 | qib_send_trap(ibp, &data, sizeof(data)); |
f931551b RC |
204 | } |
205 | ||
206 | /* | |
207 | * Send a Port Capability Mask Changed trap (ch. 14.3.11). | |
208 | */ | |
209 | void qib_cap_mask_chg(struct qib_ibport *ibp) | |
210 | { | |
211 | struct ib_mad_notice_attr data; | |
212 | ||
213 | data.generic_type = IB_NOTICE_TYPE_INFO; | |
214 | data.prod_type_msb = 0; | |
215 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | |
216 | data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; | |
217 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | |
218 | data.toggle_count = 0; | |
041af0bb | 219 | memset(&data.details, 0, sizeof(data.details)); |
f931551b | 220 | data.details.ntc_144.lid = data.issuer_lid; |
f24a6d48 HC |
221 | data.details.ntc_144.new_cap_mask = |
222 | cpu_to_be32(ibp->rvp.port_cap_flags); | |
041af0bb | 223 | qib_send_trap(ibp, &data, sizeof(data)); |
f931551b RC |
224 | } |
225 | ||
226 | /* | |
227 | * Send a System Image GUID Changed trap (ch. 14.3.12). | |
228 | */ | |
229 | void qib_sys_guid_chg(struct qib_ibport *ibp) | |
230 | { | |
231 | struct ib_mad_notice_attr data; | |
232 | ||
233 | data.generic_type = IB_NOTICE_TYPE_INFO; | |
234 | data.prod_type_msb = 0; | |
235 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | |
236 | data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; | |
237 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | |
238 | data.toggle_count = 0; | |
041af0bb | 239 | memset(&data.details, 0, sizeof(data.details)); |
f931551b RC |
240 | data.details.ntc_145.lid = data.issuer_lid; |
241 | data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; | |
242 | ||
041af0bb | 243 | qib_send_trap(ibp, &data, sizeof(data)); |
f931551b RC |
244 | } |
245 | ||
246 | /* | |
247 | * Send a Node Description Changed trap (ch. 14.3.13). | |
248 | */ | |
249 | void qib_node_desc_chg(struct qib_ibport *ibp) | |
250 | { | |
251 | struct ib_mad_notice_attr data; | |
252 | ||
253 | data.generic_type = IB_NOTICE_TYPE_INFO; | |
254 | data.prod_type_msb = 0; | |
255 | data.prod_type_lsb = IB_NOTICE_PROD_CA; | |
256 | data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; | |
257 | data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); | |
258 | data.toggle_count = 0; | |
041af0bb | 259 | memset(&data.details, 0, sizeof(data.details)); |
f931551b RC |
260 | data.details.ntc_144.lid = data.issuer_lid; |
261 | data.details.ntc_144.local_changes = 1; | |
262 | data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; | |
263 | ||
041af0bb | 264 | qib_send_trap(ibp, &data, sizeof(data)); |
f931551b RC |
265 | } |
266 | ||
267 | static int subn_get_nodedescription(struct ib_smp *smp, | |
268 | struct ib_device *ibdev) | |
269 | { | |
270 | if (smp->attr_mod) | |
271 | smp->status |= IB_SMP_INVALID_FIELD; | |
272 | ||
273 | memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); | |
274 | ||
275 | return reply(smp); | |
276 | } | |
277 | ||
278 | static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, | |
279 | u8 port) | |
280 | { | |
281 | struct ib_node_info *nip = (struct ib_node_info *)&smp->data; | |
282 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | |
283 | u32 vendor, majrev, minrev; | |
284 | unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ | |
285 | ||
286 | /* GUID 0 is illegal */ | |
287 | if (smp->attr_mod || pidx >= dd->num_pports || | |
288 | dd->pport[pidx].guid == 0) | |
289 | smp->status |= IB_SMP_INVALID_FIELD; | |
290 | else | |
291 | nip->port_guid = dd->pport[pidx].guid; | |
292 | ||
293 | nip->base_version = 1; | |
294 | nip->class_version = 1; | |
295 | nip->node_type = 1; /* channel adapter */ | |
296 | nip->num_ports = ibdev->phys_port_cnt; | |
297 | /* This is already in network order */ | |
298 | nip->sys_guid = ib_qib_sys_image_guid; | |
299 | nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */ | |
300 | nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd)); | |
301 | nip->device_id = cpu_to_be16(dd->deviceid); | |
302 | majrev = dd->majrev; | |
303 | minrev = dd->minrev; | |
304 | nip->revision = cpu_to_be32((majrev << 16) | minrev); | |
305 | nip->local_port_num = port; | |
306 | vendor = dd->vendorid; | |
307 | nip->vendor_id[0] = QIB_SRC_OUI_1; | |
308 | nip->vendor_id[1] = QIB_SRC_OUI_2; | |
309 | nip->vendor_id[2] = QIB_SRC_OUI_3; | |
310 | ||
311 | return reply(smp); | |
312 | } | |
313 | ||
314 | static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, | |
315 | u8 port) | |
316 | { | |
317 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | |
318 | u32 startgx = 8 * be32_to_cpu(smp->attr_mod); | |
319 | __be64 *p = (__be64 *) smp->data; | |
320 | unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ | |
321 | ||
322 | /* 32 blocks of 8 64-bit GUIDs per block */ | |
323 | ||
324 | memset(smp->data, 0, sizeof(smp->data)); | |
325 | ||
326 | if (startgx == 0 && pidx < dd->num_pports) { | |
327 | struct qib_pportdata *ppd = dd->pport + pidx; | |
328 | struct qib_ibport *ibp = &ppd->ibport_data; | |
329 | __be64 g = ppd->guid; | |
330 | unsigned i; | |
331 | ||
332 | /* GUID 0 is illegal */ | |
333 | if (g == 0) | |
334 | smp->status |= IB_SMP_INVALID_FIELD; | |
335 | else { | |
336 | /* The first is a copy of the read-only HW GUID. */ | |
337 | p[0] = g; | |
338 | for (i = 1; i < QIB_GUIDS_PER_PORT; i++) | |
339 | p[i] = ibp->guids[i - 1]; | |
340 | } | |
341 | } else | |
342 | smp->status |= IB_SMP_INVALID_FIELD; | |
343 | ||
344 | return reply(smp); | |
345 | } | |
346 | ||
347 | static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w) | |
348 | { | |
349 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w); | |
350 | } | |
351 | ||
352 | static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s) | |
353 | { | |
354 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s); | |
355 | } | |
356 | ||
357 | static int get_overrunthreshold(struct qib_pportdata *ppd) | |
358 | { | |
359 | return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH); | |
360 | } | |
361 | ||
362 | /** | |
363 | * set_overrunthreshold - set the overrun threshold | |
364 | * @ppd: the physical port data | |
365 | * @n: the new threshold | |
366 | * | |
367 | * Note that this will only take effect when the link state changes. | |
368 | */ | |
369 | static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) | |
370 | { | |
371 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH, | |
372 | (u32)n); | |
373 | return 0; | |
374 | } | |
375 | ||
376 | static int get_phyerrthreshold(struct qib_pportdata *ppd) | |
377 | { | |
378 | return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH); | |
379 | } | |
380 | ||
381 | /** | |
382 | * set_phyerrthreshold - set the physical error threshold | |
383 | * @ppd: the physical port data | |
384 | * @n: the new threshold | |
385 | * | |
386 | * Note that this will only take effect when the link state changes. | |
387 | */ | |
388 | static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n) | |
389 | { | |
390 | (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH, | |
391 | (u32)n); | |
392 | return 0; | |
393 | } | |
394 | ||
395 | /** | |
396 | * get_linkdowndefaultstate - get the default linkdown state | |
397 | * @ppd: the physical port data | |
398 | * | |
399 | * Returns zero if the default is POLL, 1 if the default is SLEEP. | |
400 | */ | |
401 | static int get_linkdowndefaultstate(struct qib_pportdata *ppd) | |
402 | { | |
403 | return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) == | |
404 | IB_LINKINITCMD_SLEEP; | |
405 | } | |
406 | ||
407 | static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) | |
408 | { | |
6199c896 | 409 | int valid_mkey = 0; |
f931551b RC |
410 | int ret = 0; |
411 | ||
412 | /* Is the mkey in the process of expiring? */ | |
f24a6d48 HC |
413 | if (ibp->rvp.mkey_lease_timeout && |
414 | time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) { | |
f931551b | 415 | /* Clear timeout and mkey protection field. */ |
f24a6d48 HC |
416 | ibp->rvp.mkey_lease_timeout = 0; |
417 | ibp->rvp.mkeyprot = 0; | |
f931551b RC |
418 | } |
419 | ||
f24a6d48 HC |
420 | if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 || |
421 | ibp->rvp.mkey == smp->mkey) | |
6199c896 JF |
422 | valid_mkey = 1; |
423 | ||
424 | /* Unset lease timeout on any valid Get/Set/TrapRepress */ | |
f24a6d48 | 425 | if (valid_mkey && ibp->rvp.mkey_lease_timeout && |
6199c896 JF |
426 | (smp->method == IB_MGMT_METHOD_GET || |
427 | smp->method == IB_MGMT_METHOD_SET || | |
428 | smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) | |
f24a6d48 | 429 | ibp->rvp.mkey_lease_timeout = 0; |
f931551b | 430 | |
6199c896 JF |
431 | if (!valid_mkey) { |
432 | switch (smp->method) { | |
433 | case IB_MGMT_METHOD_GET: | |
434 | /* Bad mkey not a violation below level 2 */ | |
f24a6d48 | 435 | if (ibp->rvp.mkeyprot < 2) |
6199c896 JF |
436 | break; |
437 | case IB_MGMT_METHOD_SET: | |
438 | case IB_MGMT_METHOD_TRAP_REPRESS: | |
f24a6d48 HC |
439 | if (ibp->rvp.mkey_violations != 0xFFFF) |
440 | ++ibp->rvp.mkey_violations; | |
441 | if (!ibp->rvp.mkey_lease_timeout && | |
442 | ibp->rvp.mkey_lease_period) | |
443 | ibp->rvp.mkey_lease_timeout = jiffies + | |
444 | ibp->rvp.mkey_lease_period * HZ; | |
6199c896 JF |
445 | /* Generate a trap notice. */ |
446 | qib_bad_mkey(ibp, smp); | |
3236b2d4 | 447 | ret = 1; |
6199c896 JF |
448 | } |
449 | } | |
450 | ||
f931551b RC |
451 | return ret; |
452 | } | |
453 | ||
454 | static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |
455 | u8 port) | |
456 | { | |
457 | struct qib_devdata *dd; | |
458 | struct qib_pportdata *ppd; | |
459 | struct qib_ibport *ibp; | |
460 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; | |
f931551b RC |
461 | u8 mtu; |
462 | int ret; | |
463 | u32 state; | |
464 | u32 port_num = be32_to_cpu(smp->attr_mod); | |
465 | ||
466 | if (port_num == 0) | |
467 | port_num = port; | |
468 | else { | |
469 | if (port_num > ibdev->phys_port_cnt) { | |
470 | smp->status |= IB_SMP_INVALID_FIELD; | |
471 | ret = reply(smp); | |
472 | goto bail; | |
473 | } | |
474 | if (port_num != port) { | |
475 | ibp = to_iport(ibdev, port_num); | |
476 | ret = check_mkey(ibp, smp, 0); | |
4c355005 | 477 | if (ret) { |
3236b2d4 | 478 | ret = IB_MAD_RESULT_FAILURE; |
f931551b | 479 | goto bail; |
4c355005 | 480 | } |
f931551b RC |
481 | } |
482 | } | |
483 | ||
484 | dd = dd_from_ibdev(ibdev); | |
485 | /* IB numbers ports from 1, hdw from 0 */ | |
486 | ppd = dd->pport + (port_num - 1); | |
487 | ibp = &ppd->ibport_data; | |
488 | ||
489 | /* Clear all fields. Only set the non-zero fields. */ | |
490 | memset(smp->data, 0, sizeof(smp->data)); | |
491 | ||
492 | /* Only return the mkey if the protection field allows it. */ | |
36b87b41 | 493 | if (!(smp->method == IB_MGMT_METHOD_GET && |
f24a6d48 HC |
494 | ibp->rvp.mkey != smp->mkey && |
495 | ibp->rvp.mkeyprot == 1)) | |
496 | pip->mkey = ibp->rvp.mkey; | |
497 | pip->gid_prefix = ibp->rvp.gid_prefix; | |
520b3ee7 | 498 | pip->lid = cpu_to_be16(ppd->lid); |
f24a6d48 HC |
499 | pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid); |
500 | pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); | |
f931551b | 501 | /* pip->diag_code; */ |
f24a6d48 | 502 | pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period); |
f931551b RC |
503 | pip->local_port_num = port; |
504 | pip->link_width_enabled = ppd->link_width_enabled; | |
505 | pip->link_width_supported = ppd->link_width_supported; | |
506 | pip->link_width_active = ppd->link_width_active; | |
507 | state = dd->f_iblink_state(ppd->lastibcstat); | |
508 | pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state; | |
509 | ||
510 | pip->portphysstate_linkdown = | |
511 | (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | | |
512 | (get_linkdowndefaultstate(ppd) ? 1 : 2); | |
f24a6d48 | 513 | pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc; |
f931551b RC |
514 | pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | |
515 | ppd->link_speed_enabled; | |
516 | switch (ppd->ibmtu) { | |
517 | default: /* something is wrong; fall through */ | |
518 | case 4096: | |
519 | mtu = IB_MTU_4096; | |
520 | break; | |
521 | case 2048: | |
522 | mtu = IB_MTU_2048; | |
523 | break; | |
524 | case 1024: | |
525 | mtu = IB_MTU_1024; | |
526 | break; | |
527 | case 512: | |
528 | mtu = IB_MTU_512; | |
529 | break; | |
530 | case 256: | |
531 | mtu = IB_MTU_256; | |
532 | break; | |
533 | } | |
f24a6d48 | 534 | pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl; |
f931551b | 535 | pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ |
f24a6d48 | 536 | pip->vl_high_limit = ibp->rvp.vl_high_limit; |
f931551b RC |
537 | pip->vl_arb_high_cap = |
538 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); | |
539 | pip->vl_arb_low_cap = | |
540 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP); | |
541 | /* InitTypeReply = 0 */ | |
542 | pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; | |
543 | /* HCAs ignore VLStallCount and HOQLife */ | |
544 | /* pip->vlstallcnt_hoqlife; */ | |
545 | pip->operationalvl_pei_peo_fpi_fpo = | |
546 | dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; | |
f24a6d48 | 547 | pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations); |
f931551b | 548 | /* P_KeyViolations are counted by hardware. */ |
f24a6d48 HC |
549 | pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations); |
550 | pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations); | |
f931551b RC |
551 | /* Only the hardware GUID is supported for now */ |
552 | pip->guid_cap = QIB_GUIDS_PER_PORT; | |
f24a6d48 | 553 | pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout; |
f931551b RC |
554 | /* 32.768 usec. response time (guessing) */ |
555 | pip->resv_resptimevalue = 3; | |
556 | pip->localphyerrors_overrunerrors = | |
557 | (get_phyerrthreshold(ppd) << 4) | | |
558 | get_overrunthreshold(ppd); | |
559 | /* pip->max_credit_hint; */ | |
f24a6d48 | 560 | if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { |
f931551b RC |
561 | u32 v; |
562 | ||
563 | v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); | |
564 | pip->link_roundtrip_latency[0] = v >> 16; | |
565 | pip->link_roundtrip_latency[1] = v >> 8; | |
566 | pip->link_roundtrip_latency[2] = v; | |
567 | } | |
568 | ||
569 | ret = reply(smp); | |
570 | ||
571 | bail: | |
572 | return ret; | |
573 | } | |
574 | ||
575 | /** | |
576 | * get_pkeys - return the PKEY table | |
577 | * @dd: the qlogic_ib device | |
578 | * @port: the IB port number | |
579 | * @pkeys: the pkey table is placed here | |
580 | */ | |
581 | static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) | |
582 | { | |
583 | struct qib_pportdata *ppd = dd->pport + port - 1; | |
584 | /* | |
585 | * always a kernel context, no locking needed. | |
586 | * If we get here with ppd setup, no need to check | |
587 | * that pd is valid. | |
588 | */ | |
589 | struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx]; | |
590 | ||
591 | memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys)); | |
592 | ||
593 | return 0; | |
594 | } | |
595 | ||
596 | static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, | |
597 | u8 port) | |
598 | { | |
599 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); | |
600 | u16 *p = (u16 *) smp->data; | |
601 | __be16 *q = (__be16 *) smp->data; | |
602 | ||
603 | /* 64 blocks of 32 16-bit P_Key entries */ | |
604 | ||
605 | memset(smp->data, 0, sizeof(smp->data)); | |
606 | if (startpx == 0) { | |
607 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | |
608 | unsigned i, n = qib_get_npkeys(dd); | |
609 | ||
610 | get_pkeys(dd, port, p); | |
611 | ||
612 | for (i = 0; i < n; i++) | |
613 | q[i] = cpu_to_be16(p[i]); | |
614 | } else | |
615 | smp->status |= IB_SMP_INVALID_FIELD; | |
616 | ||
617 | return reply(smp); | |
618 | } | |
619 | ||
620 | static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, | |
621 | u8 port) | |
622 | { | |
623 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | |
624 | u32 startgx = 8 * be32_to_cpu(smp->attr_mod); | |
625 | __be64 *p = (__be64 *) smp->data; | |
626 | unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ | |
627 | ||
628 | /* 32 blocks of 8 64-bit GUIDs per block */ | |
629 | ||
630 | if (startgx == 0 && pidx < dd->num_pports) { | |
631 | struct qib_pportdata *ppd = dd->pport + pidx; | |
632 | struct qib_ibport *ibp = &ppd->ibport_data; | |
633 | unsigned i; | |
634 | ||
635 | /* The first entry is read-only. */ | |
636 | for (i = 1; i < QIB_GUIDS_PER_PORT; i++) | |
637 | ibp->guids[i - 1] = p[i]; | |
638 | } else | |
639 | smp->status |= IB_SMP_INVALID_FIELD; | |
640 | ||
641 | /* The only GUID we support is the first read-only entry. */ | |
642 | return subn_get_guidinfo(smp, ibdev, port); | |
643 | } | |
644 | ||
645 | /** | |
646 | * subn_set_portinfo - set port information | |
647 | * @smp: the incoming SM packet | |
648 | * @ibdev: the infiniband device | |
649 | * @port: the port on the device | |
650 | * | |
651 | * Set Portinfo (see ch. 14.2.5.6). | |
652 | */ | |
653 | static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |
654 | u8 port) | |
655 | { | |
656 | struct ib_port_info *pip = (struct ib_port_info *)smp->data; | |
657 | struct ib_event event; | |
658 | struct qib_devdata *dd; | |
659 | struct qib_pportdata *ppd; | |
660 | struct qib_ibport *ibp; | |
4ccf28a2 | 661 | u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80); |
f931551b RC |
662 | unsigned long flags; |
663 | u16 lid, smlid; | |
664 | u8 lwe; | |
665 | u8 lse; | |
666 | u8 state; | |
667 | u8 vls; | |
668 | u8 msl; | |
669 | u16 lstate; | |
670 | int ret, ore, mtu; | |
671 | u32 port_num = be32_to_cpu(smp->attr_mod); | |
672 | ||
673 | if (port_num == 0) | |
674 | port_num = port; | |
675 | else { | |
676 | if (port_num > ibdev->phys_port_cnt) | |
677 | goto err; | |
678 | /* Port attributes can only be set on the receiving port */ | |
679 | if (port_num != port) | |
680 | goto get_only; | |
681 | } | |
682 | ||
683 | dd = dd_from_ibdev(ibdev); | |
684 | /* IB numbers ports from 1, hdw from 0 */ | |
685 | ppd = dd->pport + (port_num - 1); | |
686 | ibp = &ppd->ibport_data; | |
687 | event.device = ibdev; | |
688 | event.element.port_num = port; | |
689 | ||
f24a6d48 HC |
690 | ibp->rvp.mkey = pip->mkey; |
691 | ibp->rvp.gid_prefix = pip->gid_prefix; | |
692 | ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); | |
f931551b RC |
693 | |
694 | lid = be16_to_cpu(pip->lid); | |
695 | /* Must be a valid unicast LID address. */ | |
9ff198f5 | 696 | if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) |
3c9e5f4d MM |
697 | smp->status |= IB_SMP_INVALID_FIELD; |
698 | else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { | |
f931551b RC |
699 | if (ppd->lid != lid) |
700 | qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); | |
701 | if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) | |
702 | qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT); | |
703 | qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7); | |
704 | event.event = IB_EVENT_LID_CHANGE; | |
705 | ib_dispatch_event(&event); | |
706 | } | |
707 | ||
708 | smlid = be16_to_cpu(pip->sm_lid); | |
709 | msl = pip->neighbormtu_mastersmsl & 0xF; | |
710 | /* Must be a valid unicast LID address. */ | |
9ff198f5 | 711 | if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) |
3c9e5f4d | 712 | smp->status |= IB_SMP_INVALID_FIELD; |
f24a6d48 HC |
713 | else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { |
714 | spin_lock_irqsave(&ibp->rvp.lock, flags); | |
f931551b | 715 | if (ibp->sm_ah) { |
f24a6d48 | 716 | if (smlid != ibp->rvp.sm_lid) |
f931551b | 717 | ibp->sm_ah->attr.dlid = smlid; |
f24a6d48 | 718 | if (msl != ibp->rvp.sm_sl) |
f931551b RC |
719 | ibp->sm_ah->attr.sl = msl; |
720 | } | |
f24a6d48 HC |
721 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
722 | if (smlid != ibp->rvp.sm_lid) | |
723 | ibp->rvp.sm_lid = smlid; | |
724 | if (msl != ibp->rvp.sm_sl) | |
725 | ibp->rvp.sm_sl = msl; | |
f931551b RC |
726 | event.event = IB_EVENT_SM_CHANGE; |
727 | ib_dispatch_event(&event); | |
728 | } | |
729 | ||
730 | /* Allow 1x or 4x to be set (see 14.2.6.6). */ | |
731 | lwe = pip->link_width_enabled; | |
732 | if (lwe) { | |
733 | if (lwe == 0xFF) | |
cc7fb059 | 734 | set_link_width_enabled(ppd, ppd->link_width_supported); |
f931551b | 735 | else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) |
3c9e5f4d MM |
736 | smp->status |= IB_SMP_INVALID_FIELD; |
737 | else if (lwe != ppd->link_width_enabled) | |
738 | set_link_width_enabled(ppd, lwe); | |
f931551b RC |
739 | } |
740 | ||
741 | lse = pip->linkspeedactive_enabled & 0xF; | |
742 | if (lse) { | |
743 | /* | |
744 | * The IB 1.2 spec. only allows link speed values | |
745 | * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific | |
746 | * speeds. | |
747 | */ | |
748 | if (lse == 15) | |
cc7fb059 MH |
749 | set_link_speed_enabled(ppd, |
750 | ppd->link_speed_supported); | |
f931551b | 751 | else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) |
3c9e5f4d MM |
752 | smp->status |= IB_SMP_INVALID_FIELD; |
753 | else if (lse != ppd->link_speed_enabled) | |
754 | set_link_speed_enabled(ppd, lse); | |
f931551b RC |
755 | } |
756 | ||
757 | /* Set link down default state. */ | |
758 | switch (pip->portphysstate_linkdown & 0xF) { | |
759 | case 0: /* NOP */ | |
760 | break; | |
761 | case 1: /* SLEEP */ | |
762 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, | |
763 | IB_LINKINITCMD_SLEEP); | |
764 | break; | |
765 | case 2: /* POLL */ | |
766 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, | |
767 | IB_LINKINITCMD_POLL); | |
768 | break; | |
769 | default: | |
3c9e5f4d | 770 | smp->status |= IB_SMP_INVALID_FIELD; |
f931551b RC |
771 | } |
772 | ||
f24a6d48 HC |
773 | ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6; |
774 | ibp->rvp.vl_high_limit = pip->vl_high_limit; | |
f931551b | 775 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, |
f24a6d48 | 776 | ibp->rvp.vl_high_limit); |
f931551b RC |
777 | |
778 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); | |
779 | if (mtu == -1) | |
3c9e5f4d MM |
780 | smp->status |= IB_SMP_INVALID_FIELD; |
781 | else | |
782 | qib_set_mtu(ppd, mtu); | |
f931551b RC |
783 | |
784 | /* Set operational VLs */ | |
785 | vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; | |
786 | if (vls) { | |
787 | if (vls > ppd->vls_supported) | |
3c9e5f4d MM |
788 | smp->status |= IB_SMP_INVALID_FIELD; |
789 | else | |
790 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); | |
f931551b RC |
791 | } |
792 | ||
793 | if (pip->mkey_violations == 0) | |
f24a6d48 | 794 | ibp->rvp.mkey_violations = 0; |
f931551b RC |
795 | |
796 | if (pip->pkey_violations == 0) | |
f24a6d48 | 797 | ibp->rvp.pkey_violations = 0; |
f931551b RC |
798 | |
799 | if (pip->qkey_violations == 0) | |
f24a6d48 | 800 | ibp->rvp.qkey_violations = 0; |
f931551b RC |
801 | |
802 | ore = pip->localphyerrors_overrunerrors; | |
803 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) | |
3c9e5f4d | 804 | smp->status |= IB_SMP_INVALID_FIELD; |
f931551b RC |
805 | |
806 | if (set_overrunthreshold(ppd, (ore & 0xF))) | |
3c9e5f4d | 807 | smp->status |= IB_SMP_INVALID_FIELD; |
f931551b | 808 | |
f24a6d48 | 809 | ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; |
f931551b | 810 | |
f931551b RC |
811 | /* |
812 | * Do the port state change now that the other link parameters | |
813 | * have been set. | |
814 | * Changing the port physical state only makes sense if the link | |
815 | * is down or is being set to down. | |
816 | */ | |
817 | state = pip->linkspeed_portstate & 0xF; | |
818 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; | |
819 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) | |
3c9e5f4d | 820 | smp->status |= IB_SMP_INVALID_FIELD; |
f931551b RC |
821 | |
822 | /* | |
823 | * Only state changes of DOWN, ARM, and ACTIVE are valid | |
824 | * and must be in the correct state to take effect (see 7.2.6). | |
825 | */ | |
826 | switch (state) { | |
827 | case IB_PORT_NOP: | |
828 | if (lstate == 0) | |
829 | break; | |
830 | /* FALLTHROUGH */ | |
831 | case IB_PORT_DOWN: | |
832 | if (lstate == 0) | |
833 | lstate = QIB_IB_LINKDOWN_ONLY; | |
834 | else if (lstate == 1) | |
835 | lstate = QIB_IB_LINKDOWN_SLEEP; | |
836 | else if (lstate == 2) | |
837 | lstate = QIB_IB_LINKDOWN; | |
838 | else if (lstate == 3) | |
839 | lstate = QIB_IB_LINKDOWN_DISABLE; | |
3c9e5f4d MM |
840 | else { |
841 | smp->status |= IB_SMP_INVALID_FIELD; | |
842 | break; | |
843 | } | |
f931551b RC |
844 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
845 | ppd->lflags &= ~QIBL_LINKV; | |
846 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | |
847 | qib_set_linkstate(ppd, lstate); | |
848 | /* | |
849 | * Don't send a reply if the response would be sent | |
850 | * through the disabled port. | |
851 | */ | |
852 | if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) { | |
853 | ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
854 | goto done; | |
855 | } | |
856 | qib_wait_linkstate(ppd, QIBL_LINKV, 10); | |
857 | break; | |
858 | case IB_PORT_ARMED: | |
859 | qib_set_linkstate(ppd, QIB_IB_LINKARM); | |
860 | break; | |
861 | case IB_PORT_ACTIVE: | |
862 | qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); | |
863 | break; | |
864 | default: | |
3c9e5f4d | 865 | smp->status |= IB_SMP_INVALID_FIELD; |
f931551b RC |
866 | } |
867 | ||
4ccf28a2 TR |
868 | if (clientrereg) { |
869 | event.event = IB_EVENT_CLIENT_REREGISTER; | |
870 | ib_dispatch_event(&event); | |
871 | } | |
872 | ||
f931551b RC |
873 | ret = subn_get_portinfo(smp, ibdev, port); |
874 | ||
4ccf28a2 TR |
875 | /* restore re-reg bit per o14-12.2.1 */ |
876 | pip->clientrereg_resv_subnetto |= clientrereg; | |
f931551b | 877 | |
cc7fb059 | 878 | goto get_only; |
f931551b RC |
879 | |
880 | err: | |
881 | smp->status |= IB_SMP_INVALID_FIELD; | |
882 | get_only: | |
883 | ret = subn_get_portinfo(smp, ibdev, port); | |
884 | done: | |
885 | return ret; | |
886 | } | |
887 | ||
888 | /** | |
889 | * rm_pkey - decrecment the reference count for the given PKEY | |
890 | * @dd: the qlogic_ib device | |
891 | * @key: the PKEY index | |
892 | * | |
893 | * Return true if this was the last reference and the hardware table entry | |
894 | * needs to be changed. | |
895 | */ | |
896 | static int rm_pkey(struct qib_pportdata *ppd, u16 key) | |
897 | { | |
898 | int i; | |
899 | int ret; | |
900 | ||
901 | for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | |
902 | if (ppd->pkeys[i] != key) | |
903 | continue; | |
904 | if (atomic_dec_and_test(&ppd->pkeyrefs[i])) { | |
905 | ppd->pkeys[i] = 0; | |
906 | ret = 1; | |
907 | goto bail; | |
908 | } | |
909 | break; | |
910 | } | |
911 | ||
912 | ret = 0; | |
913 | ||
914 | bail: | |
915 | return ret; | |
916 | } | |
917 | ||
918 | /** | |
919 | * add_pkey - add the given PKEY to the hardware table | |
920 | * @dd: the qlogic_ib device | |
921 | * @key: the PKEY | |
922 | * | |
923 | * Return an error code if unable to add the entry, zero if no change, | |
924 | * or 1 if the hardware PKEY register needs to be updated. | |
925 | */ | |
926 | static int add_pkey(struct qib_pportdata *ppd, u16 key) | |
927 | { | |
928 | int i; | |
929 | u16 lkey = key & 0x7FFF; | |
930 | int any = 0; | |
931 | int ret; | |
932 | ||
933 | if (lkey == 0x7FFF) { | |
934 | ret = 0; | |
935 | goto bail; | |
936 | } | |
937 | ||
938 | /* Look for an empty slot or a matching PKEY. */ | |
939 | for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | |
940 | if (!ppd->pkeys[i]) { | |
941 | any++; | |
942 | continue; | |
943 | } | |
944 | /* If it matches exactly, try to increment the ref count */ | |
945 | if (ppd->pkeys[i] == key) { | |
946 | if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) { | |
947 | ret = 0; | |
948 | goto bail; | |
949 | } | |
950 | /* Lost the race. Look for an empty slot below. */ | |
951 | atomic_dec(&ppd->pkeyrefs[i]); | |
952 | any++; | |
953 | } | |
954 | /* | |
955 | * It makes no sense to have both the limited and unlimited | |
956 | * PKEY set at the same time since the unlimited one will | |
957 | * disable the limited one. | |
958 | */ | |
959 | if ((ppd->pkeys[i] & 0x7FFF) == lkey) { | |
960 | ret = -EEXIST; | |
961 | goto bail; | |
962 | } | |
963 | } | |
964 | if (!any) { | |
965 | ret = -EBUSY; | |
966 | goto bail; | |
967 | } | |
968 | for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { | |
969 | if (!ppd->pkeys[i] && | |
970 | atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { | |
971 | /* for qibstats, etc. */ | |
972 | ppd->pkeys[i] = key; | |
973 | ret = 1; | |
974 | goto bail; | |
975 | } | |
976 | } | |
977 | ret = -EBUSY; | |
978 | ||
979 | bail: | |
980 | return ret; | |
981 | } | |
982 | ||
983 | /** | |
984 | * set_pkeys - set the PKEY table for ctxt 0 | |
985 | * @dd: the qlogic_ib device | |
986 | * @port: the IB port number | |
987 | * @pkeys: the PKEY table | |
988 | */ | |
989 | static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) | |
990 | { | |
991 | struct qib_pportdata *ppd; | |
992 | struct qib_ctxtdata *rcd; | |
993 | int i; | |
994 | int changed = 0; | |
995 | ||
996 | /* | |
997 | * IB port one/two always maps to context zero/one, | |
998 | * always a kernel context, no locking needed | |
999 | * If we get here with ppd setup, no need to check | |
1000 | * that rcd is valid. | |
1001 | */ | |
1002 | ppd = dd->pport + (port - 1); | |
1003 | rcd = dd->rcd[ppd->hw_pidx]; | |
1004 | ||
1005 | for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { | |
1006 | u16 key = pkeys[i]; | |
1007 | u16 okey = rcd->pkeys[i]; | |
1008 | ||
1009 | if (key == okey) | |
1010 | continue; | |
1011 | /* | |
1012 | * The value of this PKEY table entry is changing. | |
1013 | * Remove the old entry in the hardware's array of PKEYs. | |
1014 | */ | |
1015 | if (okey & 0x7FFF) | |
1016 | changed |= rm_pkey(ppd, okey); | |
1017 | if (key & 0x7FFF) { | |
1018 | int ret = add_pkey(ppd, key); | |
1019 | ||
1020 | if (ret < 0) | |
1021 | key = 0; | |
1022 | else | |
1023 | changed |= ret; | |
1024 | } | |
1025 | rcd->pkeys[i] = key; | |
1026 | } | |
1027 | if (changed) { | |
1028 | struct ib_event event; | |
1029 | ||
1030 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); | |
1031 | ||
1032 | event.event = IB_EVENT_PKEY_CHANGE; | |
2dc05ab5 | 1033 | event.device = &dd->verbs_dev.rdi.ibdev; |
911eccd2 | 1034 | event.element.port_num = port; |
f931551b RC |
1035 | ib_dispatch_event(&event); |
1036 | } | |
1037 | return 0; | |
1038 | } | |
1039 | ||
1040 | static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, | |
1041 | u8 port) | |
1042 | { | |
1043 | u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); | |
1044 | __be16 *p = (__be16 *) smp->data; | |
1045 | u16 *q = (u16 *) smp->data; | |
1046 | struct qib_devdata *dd = dd_from_ibdev(ibdev); | |
1047 | unsigned i, n = qib_get_npkeys(dd); | |
1048 | ||
1049 | for (i = 0; i < n; i++) | |
1050 | q[i] = be16_to_cpu(p[i]); | |
1051 | ||
1052 | if (startpx != 0 || set_pkeys(dd, port, q) != 0) | |
1053 | smp->status |= IB_SMP_INVALID_FIELD; | |
1054 | ||
1055 | return subn_get_pkeytable(smp, ibdev, port); | |
1056 | } | |
1057 | ||
1058 | static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, | |
1059 | u8 port) | |
1060 | { | |
1061 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1062 | u8 *p = (u8 *) smp->data; | |
1063 | unsigned i; | |
1064 | ||
1065 | memset(smp->data, 0, sizeof(smp->data)); | |
1066 | ||
f24a6d48 | 1067 | if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) |
f931551b RC |
1068 | smp->status |= IB_SMP_UNSUP_METHOD; |
1069 | else | |
1070 | for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) | |
1071 | *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1]; | |
1072 | ||
1073 | return reply(smp); | |
1074 | } | |
1075 | ||
1076 | static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, | |
1077 | u8 port) | |
1078 | { | |
1079 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1080 | u8 *p = (u8 *) smp->data; | |
1081 | unsigned i; | |
1082 | ||
f24a6d48 | 1083 | if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) { |
f931551b RC |
1084 | smp->status |= IB_SMP_UNSUP_METHOD; |
1085 | return reply(smp); | |
1086 | } | |
1087 | ||
1088 | for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) { | |
1089 | ibp->sl_to_vl[i] = *p >> 4; | |
1090 | ibp->sl_to_vl[i + 1] = *p & 0xF; | |
1091 | } | |
1092 | qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)), | |
1093 | _QIB_EVENT_SL2VL_CHANGE_BIT); | |
1094 | ||
1095 | return subn_get_sl_to_vl(smp, ibdev, port); | |
1096 | } | |
1097 | ||
1098 | static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, | |
1099 | u8 port) | |
1100 | { | |
1101 | unsigned which = be32_to_cpu(smp->attr_mod) >> 16; | |
1102 | struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); | |
1103 | ||
1104 | memset(smp->data, 0, sizeof(smp->data)); | |
1105 | ||
1106 | if (ppd->vls_supported == IB_VL_VL0) | |
1107 | smp->status |= IB_SMP_UNSUP_METHOD; | |
1108 | else if (which == IB_VLARB_LOWPRI_0_31) | |
1109 | (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, | |
1110 | smp->data); | |
1111 | else if (which == IB_VLARB_HIGHPRI_0_31) | |
1112 | (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, | |
1113 | smp->data); | |
1114 | else | |
1115 | smp->status |= IB_SMP_INVALID_FIELD; | |
1116 | ||
1117 | return reply(smp); | |
1118 | } | |
1119 | ||
1120 | static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, | |
1121 | u8 port) | |
1122 | { | |
1123 | unsigned which = be32_to_cpu(smp->attr_mod) >> 16; | |
1124 | struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); | |
1125 | ||
1126 | if (ppd->vls_supported == IB_VL_VL0) | |
1127 | smp->status |= IB_SMP_UNSUP_METHOD; | |
1128 | else if (which == IB_VLARB_LOWPRI_0_31) | |
1129 | (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, | |
1130 | smp->data); | |
1131 | else if (which == IB_VLARB_HIGHPRI_0_31) | |
1132 | (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, | |
1133 | smp->data); | |
1134 | else | |
1135 | smp->status |= IB_SMP_INVALID_FIELD; | |
1136 | ||
1137 | return subn_get_vl_arb(smp, ibdev, port); | |
1138 | } | |
1139 | ||
1140 | static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev, | |
1141 | u8 port) | |
1142 | { | |
1143 | /* | |
1144 | * For now, we only send the trap once so no need to process this. | |
1145 | * o13-6, o13-7, | |
1146 | * o14-3.a4 The SMA shall not send any message in response to a valid | |
1147 | * SubnTrapRepress() message. | |
1148 | */ | |
1149 | return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
1150 | } | |
1151 | ||
6aea213a | 1152 | static int pma_get_classportinfo(struct ib_pma_mad *pmp, |
f931551b RC |
1153 | struct ib_device *ibdev) |
1154 | { | |
6aea213a OG |
1155 | struct ib_class_port_info *p = |
1156 | (struct ib_class_port_info *)pmp->data; | |
f931551b RC |
1157 | struct qib_devdata *dd = dd_from_ibdev(ibdev); |
1158 | ||
1159 | memset(pmp->data, 0, sizeof(pmp->data)); | |
1160 | ||
6aea213a OG |
1161 | if (pmp->mad_hdr.attr_mod != 0) |
1162 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | |
f931551b RC |
1163 | |
1164 | /* Note that AllPortSelect is not valid */ | |
1165 | p->base_version = 1; | |
1166 | p->class_version = 1; | |
6aea213a | 1167 | p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; |
f931551b RC |
1168 | /* |
1169 | * Set the most significant bit of CM2 to indicate support for | |
1170 | * congestion statistics | |
1171 | */ | |
1172 | p->reserved[0] = dd->psxmitwait_supported << 7; | |
1173 | /* | |
1174 | * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. | |
1175 | */ | |
1176 | p->resp_time_value = 18; | |
1177 | ||
1178 | return reply((struct ib_smp *) pmp); | |
1179 | } | |
1180 | ||
6aea213a | 1181 | static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp, |
f931551b RC |
1182 | struct ib_device *ibdev, u8 port) |
1183 | { | |
1184 | struct ib_pma_portsamplescontrol *p = | |
1185 | (struct ib_pma_portsamplescontrol *)pmp->data; | |
1186 | struct qib_ibdev *dev = to_idev(ibdev); | |
1187 | struct qib_devdata *dd = dd_from_dev(dev); | |
1188 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1189 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1190 | unsigned long flags; | |
1191 | u8 port_select = p->port_select; | |
1192 | ||
1193 | memset(pmp->data, 0, sizeof(pmp->data)); | |
1194 | ||
1195 | p->port_select = port_select; | |
6aea213a OG |
1196 | if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { |
1197 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | |
f931551b RC |
1198 | goto bail; |
1199 | } | |
f24a6d48 | 1200 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
f931551b RC |
1201 | p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); |
1202 | p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | |
1203 | p->counter_width = 4; /* 32 bit counters */ | |
1204 | p->counter_mask0_9 = COUNTER_MASK0_9; | |
f24a6d48 HC |
1205 | p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start); |
1206 | p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval); | |
1207 | p->tag = cpu_to_be16(ibp->rvp.pma_tag); | |
1208 | p->counter_select[0] = ibp->rvp.pma_counter_select[0]; | |
1209 | p->counter_select[1] = ibp->rvp.pma_counter_select[1]; | |
1210 | p->counter_select[2] = ibp->rvp.pma_counter_select[2]; | |
1211 | p->counter_select[3] = ibp->rvp.pma_counter_select[3]; | |
1212 | p->counter_select[4] = ibp->rvp.pma_counter_select[4]; | |
1213 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); | |
f931551b RC |
1214 | |
1215 | bail: | |
1216 | return reply((struct ib_smp *) pmp); | |
1217 | } | |
1218 | ||
6aea213a | 1219 | static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, |
f931551b RC |
1220 | struct ib_device *ibdev, u8 port) |
1221 | { | |
1222 | struct ib_pma_portsamplescontrol *p = | |
1223 | (struct ib_pma_portsamplescontrol *)pmp->data; | |
1224 | struct qib_ibdev *dev = to_idev(ibdev); | |
1225 | struct qib_devdata *dd = dd_from_dev(dev); | |
1226 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1227 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1228 | unsigned long flags; | |
1229 | u8 status, xmit_flags; | |
1230 | int ret; | |
1231 | ||
6aea213a OG |
1232 | if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { |
1233 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | |
f931551b RC |
1234 | ret = reply((struct ib_smp *) pmp); |
1235 | goto bail; | |
1236 | } | |
1237 | ||
f24a6d48 | 1238 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
f931551b RC |
1239 | |
1240 | /* Port Sampling code owns the PS* HW counters */ | |
1241 | xmit_flags = ppd->cong_stats.flags; | |
1242 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE; | |
1243 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | |
1244 | if (status == IB_PMA_SAMPLE_STATUS_DONE || | |
1245 | (status == IB_PMA_SAMPLE_STATUS_RUNNING && | |
1246 | xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { | |
f24a6d48 HC |
1247 | ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start); |
1248 | ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval); | |
1249 | ibp->rvp.pma_tag = be16_to_cpu(p->tag); | |
1250 | ibp->rvp.pma_counter_select[0] = p->counter_select[0]; | |
1251 | ibp->rvp.pma_counter_select[1] = p->counter_select[1]; | |
1252 | ibp->rvp.pma_counter_select[2] = p->counter_select[2]; | |
1253 | ibp->rvp.pma_counter_select[3] = p->counter_select[3]; | |
1254 | ibp->rvp.pma_counter_select[4] = p->counter_select[4]; | |
1255 | dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval, | |
1256 | ibp->rvp.pma_sample_start); | |
f931551b | 1257 | } |
f24a6d48 | 1258 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); |
f931551b RC |
1259 | |
1260 | ret = pma_get_portsamplescontrol(pmp, ibdev, port); | |
1261 | ||
1262 | bail: | |
1263 | return ret; | |
1264 | } | |
1265 | ||
1266 | static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd, | |
1267 | __be16 sel) | |
1268 | { | |
1269 | u64 ret; | |
1270 | ||
1271 | switch (sel) { | |
1272 | case IB_PMA_PORT_XMIT_DATA: | |
1273 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA); | |
1274 | break; | |
1275 | case IB_PMA_PORT_RCV_DATA: | |
1276 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA); | |
1277 | break; | |
1278 | case IB_PMA_PORT_XMIT_PKTS: | |
1279 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS); | |
1280 | break; | |
1281 | case IB_PMA_PORT_RCV_PKTS: | |
1282 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS); | |
1283 | break; | |
1284 | case IB_PMA_PORT_XMIT_WAIT: | |
1285 | ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT); | |
1286 | break; | |
1287 | default: | |
1288 | ret = 0; | |
1289 | } | |
1290 | ||
1291 | return ret; | |
1292 | } | |
1293 | ||
1294 | /* This function assumes that the xmit_wait lock is already held */ | |
1295 | static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd) | |
1296 | { | |
1297 | u32 delta; | |
1298 | ||
1299 | delta = get_counter(&ppd->ibport_data, ppd, | |
1300 | IB_PMA_PORT_XMIT_WAIT); | |
1301 | return ppd->cong_stats.counter + delta; | |
1302 | } | |
1303 | ||
1304 | static void cache_hw_sample_counters(struct qib_pportdata *ppd) | |
1305 | { | |
1306 | struct qib_ibport *ibp = &ppd->ibport_data; | |
1307 | ||
1308 | ppd->cong_stats.counter_cache.psxmitdata = | |
1309 | get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA); | |
1310 | ppd->cong_stats.counter_cache.psrcvdata = | |
1311 | get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA); | |
1312 | ppd->cong_stats.counter_cache.psxmitpkts = | |
1313 | get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS); | |
1314 | ppd->cong_stats.counter_cache.psrcvpkts = | |
1315 | get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS); | |
1316 | ppd->cong_stats.counter_cache.psxmitwait = | |
1317 | get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT); | |
1318 | } | |
1319 | ||
1320 | static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd, | |
1321 | __be16 sel) | |
1322 | { | |
1323 | u64 ret; | |
1324 | ||
1325 | switch (sel) { | |
1326 | case IB_PMA_PORT_XMIT_DATA: | |
1327 | ret = ppd->cong_stats.counter_cache.psxmitdata; | |
1328 | break; | |
1329 | case IB_PMA_PORT_RCV_DATA: | |
1330 | ret = ppd->cong_stats.counter_cache.psrcvdata; | |
1331 | break; | |
1332 | case IB_PMA_PORT_XMIT_PKTS: | |
1333 | ret = ppd->cong_stats.counter_cache.psxmitpkts; | |
1334 | break; | |
1335 | case IB_PMA_PORT_RCV_PKTS: | |
1336 | ret = ppd->cong_stats.counter_cache.psrcvpkts; | |
1337 | break; | |
1338 | case IB_PMA_PORT_XMIT_WAIT: | |
1339 | ret = ppd->cong_stats.counter_cache.psxmitwait; | |
1340 | break; | |
1341 | default: | |
1342 | ret = 0; | |
1343 | } | |
1344 | ||
1345 | return ret; | |
1346 | } | |
1347 | ||
6aea213a | 1348 | static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, |
f931551b RC |
1349 | struct ib_device *ibdev, u8 port) |
1350 | { | |
1351 | struct ib_pma_portsamplesresult *p = | |
1352 | (struct ib_pma_portsamplesresult *)pmp->data; | |
1353 | struct qib_ibdev *dev = to_idev(ibdev); | |
1354 | struct qib_devdata *dd = dd_from_dev(dev); | |
1355 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1356 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1357 | unsigned long flags; | |
1358 | u8 status; | |
1359 | int i; | |
1360 | ||
1361 | memset(pmp->data, 0, sizeof(pmp->data)); | |
f24a6d48 HC |
1362 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
1363 | p->tag = cpu_to_be16(ibp->rvp.pma_tag); | |
f931551b RC |
1364 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) |
1365 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; | |
1366 | else { | |
1367 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | |
1368 | p->sample_status = cpu_to_be16(status); | |
1369 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { | |
1370 | cache_hw_sample_counters(ppd); | |
1371 | ppd->cong_stats.counter = | |
1372 | xmit_wait_get_value_delta(ppd); | |
1373 | dd->f_set_cntr_sample(ppd, | |
1374 | QIB_CONG_TIMER_PSINTERVAL, 0); | |
1375 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | |
1376 | } | |
1377 | } | |
f24a6d48 | 1378 | for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++) |
f931551b RC |
1379 | p->counter[i] = cpu_to_be32( |
1380 | get_cache_hw_sample_counters( | |
f24a6d48 HC |
1381 | ppd, ibp->rvp.pma_counter_select[i])); |
1382 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); | |
f931551b RC |
1383 | |
1384 | return reply((struct ib_smp *) pmp); | |
1385 | } | |
1386 | ||
6aea213a | 1387 | static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, |
f931551b RC |
1388 | struct ib_device *ibdev, u8 port) |
1389 | { | |
1390 | struct ib_pma_portsamplesresult_ext *p = | |
1391 | (struct ib_pma_portsamplesresult_ext *)pmp->data; | |
1392 | struct qib_ibdev *dev = to_idev(ibdev); | |
1393 | struct qib_devdata *dd = dd_from_dev(dev); | |
1394 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1395 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1396 | unsigned long flags; | |
1397 | u8 status; | |
1398 | int i; | |
1399 | ||
1400 | /* Port Sampling code owns the PS* HW counters */ | |
1401 | memset(pmp->data, 0, sizeof(pmp->data)); | |
f24a6d48 HC |
1402 | spin_lock_irqsave(&ibp->rvp.lock, flags); |
1403 | p->tag = cpu_to_be16(ibp->rvp.pma_tag); | |
f931551b RC |
1404 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) |
1405 | p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; | |
1406 | else { | |
1407 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | |
1408 | p->sample_status = cpu_to_be16(status); | |
1409 | /* 64 bits */ | |
1410 | p->extended_width = cpu_to_be32(0x80000000); | |
1411 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { | |
1412 | cache_hw_sample_counters(ppd); | |
1413 | ppd->cong_stats.counter = | |
1414 | xmit_wait_get_value_delta(ppd); | |
1415 | dd->f_set_cntr_sample(ppd, | |
1416 | QIB_CONG_TIMER_PSINTERVAL, 0); | |
1417 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | |
1418 | } | |
1419 | } | |
f24a6d48 | 1420 | for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++) |
f931551b RC |
1421 | p->counter[i] = cpu_to_be64( |
1422 | get_cache_hw_sample_counters( | |
f24a6d48 HC |
1423 | ppd, ibp->rvp.pma_counter_select[i])); |
1424 | spin_unlock_irqrestore(&ibp->rvp.lock, flags); | |
f931551b RC |
1425 | |
1426 | return reply((struct ib_smp *) pmp); | |
1427 | } | |
1428 | ||
6aea213a | 1429 | static int pma_get_portcounters(struct ib_pma_mad *pmp, |
f931551b RC |
1430 | struct ib_device *ibdev, u8 port) |
1431 | { | |
1432 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | |
1433 | pmp->data; | |
1434 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1435 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1436 | struct qib_verbs_counters cntrs; | |
1437 | u8 port_select = p->port_select; | |
1438 | ||
1439 | qib_get_counters(ppd, &cntrs); | |
1440 | ||
1441 | /* Adjust counters for any resets done. */ | |
1442 | cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; | |
1443 | cntrs.link_error_recovery_counter -= | |
1444 | ibp->z_link_error_recovery_counter; | |
1445 | cntrs.link_downed_counter -= ibp->z_link_downed_counter; | |
1446 | cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; | |
1447 | cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; | |
1448 | cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; | |
1449 | cntrs.port_xmit_data -= ibp->z_port_xmit_data; | |
1450 | cntrs.port_rcv_data -= ibp->z_port_rcv_data; | |
1451 | cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; | |
1452 | cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; | |
1453 | cntrs.local_link_integrity_errors -= | |
1454 | ibp->z_local_link_integrity_errors; | |
1455 | cntrs.excessive_buffer_overrun_errors -= | |
1456 | ibp->z_excessive_buffer_overrun_errors; | |
1457 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; | |
f24a6d48 | 1458 | cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped; |
f931551b RC |
1459 | |
1460 | memset(pmp->data, 0, sizeof(pmp->data)); | |
1461 | ||
1462 | p->port_select = port_select; | |
6aea213a OG |
1463 | if (pmp->mad_hdr.attr_mod != 0 || port_select != port) |
1464 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | |
f931551b RC |
1465 | |
1466 | if (cntrs.symbol_error_counter > 0xFFFFUL) | |
1467 | p->symbol_error_counter = cpu_to_be16(0xFFFF); | |
1468 | else | |
1469 | p->symbol_error_counter = | |
1470 | cpu_to_be16((u16)cntrs.symbol_error_counter); | |
1471 | if (cntrs.link_error_recovery_counter > 0xFFUL) | |
1472 | p->link_error_recovery_counter = 0xFF; | |
1473 | else | |
1474 | p->link_error_recovery_counter = | |
1475 | (u8)cntrs.link_error_recovery_counter; | |
1476 | if (cntrs.link_downed_counter > 0xFFUL) | |
1477 | p->link_downed_counter = 0xFF; | |
1478 | else | |
1479 | p->link_downed_counter = (u8)cntrs.link_downed_counter; | |
1480 | if (cntrs.port_rcv_errors > 0xFFFFUL) | |
1481 | p->port_rcv_errors = cpu_to_be16(0xFFFF); | |
1482 | else | |
1483 | p->port_rcv_errors = | |
1484 | cpu_to_be16((u16) cntrs.port_rcv_errors); | |
1485 | if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) | |
1486 | p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); | |
1487 | else | |
1488 | p->port_rcv_remphys_errors = | |
1489 | cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); | |
1490 | if (cntrs.port_xmit_discards > 0xFFFFUL) | |
1491 | p->port_xmit_discards = cpu_to_be16(0xFFFF); | |
1492 | else | |
1493 | p->port_xmit_discards = | |
1494 | cpu_to_be16((u16)cntrs.port_xmit_discards); | |
1495 | if (cntrs.local_link_integrity_errors > 0xFUL) | |
1496 | cntrs.local_link_integrity_errors = 0xFUL; | |
1497 | if (cntrs.excessive_buffer_overrun_errors > 0xFUL) | |
1498 | cntrs.excessive_buffer_overrun_errors = 0xFUL; | |
6aea213a | 1499 | p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | |
f931551b RC |
1500 | cntrs.excessive_buffer_overrun_errors; |
1501 | if (cntrs.vl15_dropped > 0xFFFFUL) | |
1502 | p->vl15_dropped = cpu_to_be16(0xFFFF); | |
1503 | else | |
1504 | p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); | |
1505 | if (cntrs.port_xmit_data > 0xFFFFFFFFUL) | |
1506 | p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); | |
1507 | else | |
1508 | p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); | |
1509 | if (cntrs.port_rcv_data > 0xFFFFFFFFUL) | |
1510 | p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); | |
1511 | else | |
1512 | p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); | |
1513 | if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) | |
1514 | p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); | |
1515 | else | |
1516 | p->port_xmit_packets = | |
1517 | cpu_to_be32((u32)cntrs.port_xmit_packets); | |
1518 | if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) | |
1519 | p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); | |
1520 | else | |
1521 | p->port_rcv_packets = | |
1522 | cpu_to_be32((u32) cntrs.port_rcv_packets); | |
1523 | ||
1524 | return reply((struct ib_smp *) pmp); | |
1525 | } | |
1526 | ||
6aea213a | 1527 | static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, |
f931551b RC |
1528 | struct ib_device *ibdev, u8 port) |
1529 | { | |
1530 | /* Congestion PMA packets start at offset 24 not 64 */ | |
1531 | struct ib_pma_portcounters_cong *p = | |
1532 | (struct ib_pma_portcounters_cong *)pmp->reserved; | |
1533 | struct qib_verbs_counters cntrs; | |
1534 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1535 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1536 | struct qib_devdata *dd = dd_from_ppd(ppd); | |
6aea213a | 1537 | u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF; |
f931551b RC |
1538 | u64 xmit_wait_counter; |
1539 | unsigned long flags; | |
1540 | ||
1541 | /* | |
1542 | * This check is performed only in the GET method because the | |
1543 | * SET method ends up calling this anyway. | |
1544 | */ | |
1545 | if (!dd->psxmitwait_supported) | |
6aea213a | 1546 | pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; |
f931551b | 1547 | if (port_select != port) |
6aea213a | 1548 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; |
f931551b RC |
1549 | |
1550 | qib_get_counters(ppd, &cntrs); | |
f24a6d48 | 1551 | spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); |
f931551b | 1552 | xmit_wait_counter = xmit_wait_get_value_delta(ppd); |
f24a6d48 | 1553 | spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); |
f931551b RC |
1554 | |
1555 | /* Adjust counters for any resets done. */ | |
1556 | cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; | |
1557 | cntrs.link_error_recovery_counter -= | |
1558 | ibp->z_link_error_recovery_counter; | |
1559 | cntrs.link_downed_counter -= ibp->z_link_downed_counter; | |
1560 | cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; | |
1561 | cntrs.port_rcv_remphys_errors -= | |
1562 | ibp->z_port_rcv_remphys_errors; | |
1563 | cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; | |
1564 | cntrs.local_link_integrity_errors -= | |
1565 | ibp->z_local_link_integrity_errors; | |
1566 | cntrs.excessive_buffer_overrun_errors -= | |
1567 | ibp->z_excessive_buffer_overrun_errors; | |
1568 | cntrs.vl15_dropped -= ibp->z_vl15_dropped; | |
f24a6d48 | 1569 | cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped; |
f931551b RC |
1570 | cntrs.port_xmit_data -= ibp->z_port_xmit_data; |
1571 | cntrs.port_rcv_data -= ibp->z_port_rcv_data; | |
1572 | cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; | |
1573 | cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; | |
1574 | ||
1575 | memset(pmp->reserved, 0, sizeof(pmp->reserved) + | |
1576 | sizeof(pmp->data)); | |
1577 | ||
1578 | /* | |
1579 | * Set top 3 bits to indicate interval in picoseconds in | |
1580 | * remaining bits. | |
1581 | */ | |
1582 | p->port_check_rate = | |
1583 | cpu_to_be16((QIB_XMIT_RATE_PICO << 13) | | |
1584 | (dd->psxmitwait_check_rate & | |
1585 | ~(QIB_XMIT_RATE_PICO << 13))); | |
1586 | p->port_adr_events = cpu_to_be64(0); | |
1587 | p->port_xmit_wait = cpu_to_be64(xmit_wait_counter); | |
1588 | p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data); | |
1589 | p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data); | |
1590 | p->port_xmit_packets = | |
1591 | cpu_to_be64(cntrs.port_xmit_packets); | |
1592 | p->port_rcv_packets = | |
1593 | cpu_to_be64(cntrs.port_rcv_packets); | |
1594 | if (cntrs.symbol_error_counter > 0xFFFFUL) | |
1595 | p->symbol_error_counter = cpu_to_be16(0xFFFF); | |
1596 | else | |
1597 | p->symbol_error_counter = | |
1598 | cpu_to_be16( | |
1599 | (u16)cntrs.symbol_error_counter); | |
1600 | if (cntrs.link_error_recovery_counter > 0xFFUL) | |
1601 | p->link_error_recovery_counter = 0xFF; | |
1602 | else | |
1603 | p->link_error_recovery_counter = | |
1604 | (u8)cntrs.link_error_recovery_counter; | |
1605 | if (cntrs.link_downed_counter > 0xFFUL) | |
1606 | p->link_downed_counter = 0xFF; | |
1607 | else | |
1608 | p->link_downed_counter = | |
1609 | (u8)cntrs.link_downed_counter; | |
1610 | if (cntrs.port_rcv_errors > 0xFFFFUL) | |
1611 | p->port_rcv_errors = cpu_to_be16(0xFFFF); | |
1612 | else | |
1613 | p->port_rcv_errors = | |
1614 | cpu_to_be16((u16) cntrs.port_rcv_errors); | |
1615 | if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) | |
1616 | p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); | |
1617 | else | |
1618 | p->port_rcv_remphys_errors = | |
1619 | cpu_to_be16( | |
1620 | (u16)cntrs.port_rcv_remphys_errors); | |
1621 | if (cntrs.port_xmit_discards > 0xFFFFUL) | |
1622 | p->port_xmit_discards = cpu_to_be16(0xFFFF); | |
1623 | else | |
1624 | p->port_xmit_discards = | |
1625 | cpu_to_be16((u16)cntrs.port_xmit_discards); | |
1626 | if (cntrs.local_link_integrity_errors > 0xFUL) | |
1627 | cntrs.local_link_integrity_errors = 0xFUL; | |
1628 | if (cntrs.excessive_buffer_overrun_errors > 0xFUL) | |
1629 | cntrs.excessive_buffer_overrun_errors = 0xFUL; | |
6aea213a | 1630 | p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | |
f931551b RC |
1631 | cntrs.excessive_buffer_overrun_errors; |
1632 | if (cntrs.vl15_dropped > 0xFFFFUL) | |
1633 | p->vl15_dropped = cpu_to_be16(0xFFFF); | |
1634 | else | |
1635 | p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); | |
1636 | ||
1637 | return reply((struct ib_smp *)pmp); | |
1638 | } | |
1639 | ||
7d7632ad MM |
1640 | static void qib_snapshot_pmacounters( |
1641 | struct qib_ibport *ibp, | |
1642 | struct qib_pma_counters *pmacounters) | |
1643 | { | |
1644 | struct qib_pma_counters *p; | |
1645 | int cpu; | |
1646 | ||
1647 | memset(pmacounters, 0, sizeof(*pmacounters)); | |
1648 | for_each_possible_cpu(cpu) { | |
1649 | p = per_cpu_ptr(ibp->pmastats, cpu); | |
1650 | pmacounters->n_unicast_xmit += p->n_unicast_xmit; | |
1651 | pmacounters->n_unicast_rcv += p->n_unicast_rcv; | |
1652 | pmacounters->n_multicast_xmit += p->n_multicast_xmit; | |
1653 | pmacounters->n_multicast_rcv += p->n_multicast_rcv; | |
1654 | } | |
1655 | } | |
1656 | ||
6aea213a | 1657 | static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, |
f931551b RC |
1658 | struct ib_device *ibdev, u8 port) |
1659 | { | |
1660 | struct ib_pma_portcounters_ext *p = | |
1661 | (struct ib_pma_portcounters_ext *)pmp->data; | |
1662 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1663 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1664 | u64 swords, rwords, spkts, rpkts, xwait; | |
7d7632ad | 1665 | struct qib_pma_counters pma; |
f931551b RC |
1666 | u8 port_select = p->port_select; |
1667 | ||
1668 | memset(pmp->data, 0, sizeof(pmp->data)); | |
1669 | ||
1670 | p->port_select = port_select; | |
6aea213a OG |
1671 | if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { |
1672 | pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; | |
f931551b RC |
1673 | goto bail; |
1674 | } | |
1675 | ||
1676 | qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); | |
1677 | ||
1678 | /* Adjust counters for any resets done. */ | |
1679 | swords -= ibp->z_port_xmit_data; | |
1680 | rwords -= ibp->z_port_rcv_data; | |
1681 | spkts -= ibp->z_port_xmit_packets; | |
1682 | rpkts -= ibp->z_port_rcv_packets; | |
1683 | ||
1684 | p->port_xmit_data = cpu_to_be64(swords); | |
1685 | p->port_rcv_data = cpu_to_be64(rwords); | |
1686 | p->port_xmit_packets = cpu_to_be64(spkts); | |
1687 | p->port_rcv_packets = cpu_to_be64(rpkts); | |
7d7632ad MM |
1688 | |
1689 | qib_snapshot_pmacounters(ibp, &pma); | |
1690 | ||
1691 | p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit | |
1692 | - ibp->z_unicast_xmit); | |
1693 | p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv | |
1694 | - ibp->z_unicast_rcv); | |
1695 | p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit | |
1696 | - ibp->z_multicast_xmit); | |
1697 | p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv | |
1698 | - ibp->z_multicast_rcv); | |
f931551b RC |
1699 | |
1700 | bail: | |
1701 | return reply((struct ib_smp *) pmp); | |
1702 | } | |
1703 | ||
6aea213a | 1704 | static int pma_set_portcounters(struct ib_pma_mad *pmp, |
f931551b RC |
1705 | struct ib_device *ibdev, u8 port) |
1706 | { | |
1707 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | |
1708 | pmp->data; | |
1709 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1710 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1711 | struct qib_verbs_counters cntrs; | |
1712 | ||
1713 | /* | |
1714 | * Since the HW doesn't support clearing counters, we save the | |
1715 | * current count and subtract it from future responses. | |
1716 | */ | |
1717 | qib_get_counters(ppd, &cntrs); | |
1718 | ||
1719 | if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) | |
1720 | ibp->z_symbol_error_counter = cntrs.symbol_error_counter; | |
1721 | ||
1722 | if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) | |
1723 | ibp->z_link_error_recovery_counter = | |
1724 | cntrs.link_error_recovery_counter; | |
1725 | ||
1726 | if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) | |
1727 | ibp->z_link_downed_counter = cntrs.link_downed_counter; | |
1728 | ||
1729 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) | |
1730 | ibp->z_port_rcv_errors = cntrs.port_rcv_errors; | |
1731 | ||
1732 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) | |
1733 | ibp->z_port_rcv_remphys_errors = | |
1734 | cntrs.port_rcv_remphys_errors; | |
1735 | ||
1736 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) | |
1737 | ibp->z_port_xmit_discards = cntrs.port_xmit_discards; | |
1738 | ||
1739 | if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) | |
1740 | ibp->z_local_link_integrity_errors = | |
1741 | cntrs.local_link_integrity_errors; | |
1742 | ||
1743 | if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) | |
1744 | ibp->z_excessive_buffer_overrun_errors = | |
1745 | cntrs.excessive_buffer_overrun_errors; | |
1746 | ||
1747 | if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { | |
f24a6d48 | 1748 | ibp->rvp.n_vl15_dropped = 0; |
f931551b RC |
1749 | ibp->z_vl15_dropped = cntrs.vl15_dropped; |
1750 | } | |
1751 | ||
1752 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) | |
1753 | ibp->z_port_xmit_data = cntrs.port_xmit_data; | |
1754 | ||
1755 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) | |
1756 | ibp->z_port_rcv_data = cntrs.port_rcv_data; | |
1757 | ||
1758 | if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) | |
1759 | ibp->z_port_xmit_packets = cntrs.port_xmit_packets; | |
1760 | ||
1761 | if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) | |
1762 | ibp->z_port_rcv_packets = cntrs.port_rcv_packets; | |
1763 | ||
1764 | return pma_get_portcounters(pmp, ibdev, port); | |
1765 | } | |
1766 | ||
6aea213a | 1767 | static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, |
f931551b RC |
1768 | struct ib_device *ibdev, u8 port) |
1769 | { | |
1770 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1771 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1772 | struct qib_devdata *dd = dd_from_ppd(ppd); | |
1773 | struct qib_verbs_counters cntrs; | |
6aea213a | 1774 | u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF; |
f931551b RC |
1775 | int ret = 0; |
1776 | unsigned long flags; | |
1777 | ||
1778 | qib_get_counters(ppd, &cntrs); | |
1779 | /* Get counter values before we save them */ | |
1780 | ret = pma_get_portcounters_cong(pmp, ibdev, port); | |
1781 | ||
1782 | if (counter_select & IB_PMA_SEL_CONG_XMIT) { | |
f24a6d48 | 1783 | spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); |
f931551b RC |
1784 | ppd->cong_stats.counter = 0; |
1785 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, | |
1786 | 0x0); | |
f24a6d48 | 1787 | spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); |
f931551b RC |
1788 | } |
1789 | if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { | |
1790 | ibp->z_port_xmit_data = cntrs.port_xmit_data; | |
1791 | ibp->z_port_rcv_data = cntrs.port_rcv_data; | |
1792 | ibp->z_port_xmit_packets = cntrs.port_xmit_packets; | |
1793 | ibp->z_port_rcv_packets = cntrs.port_rcv_packets; | |
1794 | } | |
1795 | if (counter_select & IB_PMA_SEL_CONG_ALL) { | |
1796 | ibp->z_symbol_error_counter = | |
1797 | cntrs.symbol_error_counter; | |
1798 | ibp->z_link_error_recovery_counter = | |
1799 | cntrs.link_error_recovery_counter; | |
1800 | ibp->z_link_downed_counter = | |
1801 | cntrs.link_downed_counter; | |
1802 | ibp->z_port_rcv_errors = cntrs.port_rcv_errors; | |
1803 | ibp->z_port_rcv_remphys_errors = | |
1804 | cntrs.port_rcv_remphys_errors; | |
1805 | ibp->z_port_xmit_discards = | |
1806 | cntrs.port_xmit_discards; | |
1807 | ibp->z_local_link_integrity_errors = | |
1808 | cntrs.local_link_integrity_errors; | |
1809 | ibp->z_excessive_buffer_overrun_errors = | |
1810 | cntrs.excessive_buffer_overrun_errors; | |
f24a6d48 | 1811 | ibp->rvp.n_vl15_dropped = 0; |
f931551b RC |
1812 | ibp->z_vl15_dropped = cntrs.vl15_dropped; |
1813 | } | |
1814 | ||
1815 | return ret; | |
1816 | } | |
1817 | ||
6aea213a | 1818 | static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, |
f931551b RC |
1819 | struct ib_device *ibdev, u8 port) |
1820 | { | |
1821 | struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) | |
1822 | pmp->data; | |
1823 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1824 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1825 | u64 swords, rwords, spkts, rpkts, xwait; | |
7d7632ad | 1826 | struct qib_pma_counters pma; |
f931551b RC |
1827 | |
1828 | qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); | |
1829 | ||
1830 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) | |
1831 | ibp->z_port_xmit_data = swords; | |
1832 | ||
1833 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) | |
1834 | ibp->z_port_rcv_data = rwords; | |
1835 | ||
1836 | if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) | |
1837 | ibp->z_port_xmit_packets = spkts; | |
1838 | ||
1839 | if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) | |
1840 | ibp->z_port_rcv_packets = rpkts; | |
1841 | ||
7d7632ad MM |
1842 | qib_snapshot_pmacounters(ibp, &pma); |
1843 | ||
f931551b | 1844 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) |
7d7632ad | 1845 | ibp->z_unicast_xmit = pma.n_unicast_xmit; |
f931551b RC |
1846 | |
1847 | if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) | |
7d7632ad | 1848 | ibp->z_unicast_rcv = pma.n_unicast_rcv; |
f931551b RC |
1849 | |
1850 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) | |
7d7632ad | 1851 | ibp->z_multicast_xmit = pma.n_multicast_xmit; |
f931551b RC |
1852 | |
1853 | if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) | |
7d7632ad | 1854 | ibp->z_multicast_rcv = pma.n_multicast_rcv; |
f931551b RC |
1855 | |
1856 | return pma_get_portcounters_ext(pmp, ibdev, port); | |
1857 | } | |
1858 | ||
1859 | static int process_subn(struct ib_device *ibdev, int mad_flags, | |
a97e2d86 | 1860 | u8 port, const struct ib_mad *in_mad, |
f931551b RC |
1861 | struct ib_mad *out_mad) |
1862 | { | |
1863 | struct ib_smp *smp = (struct ib_smp *)out_mad; | |
1864 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
1865 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
1866 | int ret; | |
1867 | ||
1868 | *out_mad = *in_mad; | |
1869 | if (smp->class_version != 1) { | |
1870 | smp->status |= IB_SMP_UNSUP_VERSION; | |
1871 | ret = reply(smp); | |
1872 | goto bail; | |
1873 | } | |
1874 | ||
1875 | ret = check_mkey(ibp, smp, mad_flags); | |
1876 | if (ret) { | |
1877 | u32 port_num = be32_to_cpu(smp->attr_mod); | |
1878 | ||
1879 | /* | |
1880 | * If this is a get/set portinfo, we already check the | |
1881 | * M_Key if the MAD is for another port and the M_Key | |
1882 | * is OK on the receiving port. This check is needed | |
1883 | * to increment the error counters when the M_Key | |
1884 | * fails to match on *both* ports. | |
1885 | */ | |
1886 | if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && | |
1887 | (smp->method == IB_MGMT_METHOD_GET || | |
1888 | smp->method == IB_MGMT_METHOD_SET) && | |
1889 | port_num && port_num <= ibdev->phys_port_cnt && | |
1890 | port != port_num) | |
1891 | (void) check_mkey(to_iport(ibdev, port_num), smp, 0); | |
3236b2d4 | 1892 | ret = IB_MAD_RESULT_FAILURE; |
f931551b RC |
1893 | goto bail; |
1894 | } | |
1895 | ||
1896 | switch (smp->method) { | |
1897 | case IB_MGMT_METHOD_GET: | |
1898 | switch (smp->attr_id) { | |
1899 | case IB_SMP_ATTR_NODE_DESC: | |
1900 | ret = subn_get_nodedescription(smp, ibdev); | |
1901 | goto bail; | |
1902 | case IB_SMP_ATTR_NODE_INFO: | |
1903 | ret = subn_get_nodeinfo(smp, ibdev, port); | |
1904 | goto bail; | |
1905 | case IB_SMP_ATTR_GUID_INFO: | |
1906 | ret = subn_get_guidinfo(smp, ibdev, port); | |
1907 | goto bail; | |
1908 | case IB_SMP_ATTR_PORT_INFO: | |
1909 | ret = subn_get_portinfo(smp, ibdev, port); | |
1910 | goto bail; | |
1911 | case IB_SMP_ATTR_PKEY_TABLE: | |
1912 | ret = subn_get_pkeytable(smp, ibdev, port); | |
1913 | goto bail; | |
1914 | case IB_SMP_ATTR_SL_TO_VL_TABLE: | |
1915 | ret = subn_get_sl_to_vl(smp, ibdev, port); | |
1916 | goto bail; | |
1917 | case IB_SMP_ATTR_VL_ARB_TABLE: | |
1918 | ret = subn_get_vl_arb(smp, ibdev, port); | |
1919 | goto bail; | |
1920 | case IB_SMP_ATTR_SM_INFO: | |
f24a6d48 | 1921 | if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) { |
f931551b RC |
1922 | ret = IB_MAD_RESULT_SUCCESS | |
1923 | IB_MAD_RESULT_CONSUMED; | |
1924 | goto bail; | |
1925 | } | |
f24a6d48 | 1926 | if (ibp->rvp.port_cap_flags & IB_PORT_SM) { |
f931551b RC |
1927 | ret = IB_MAD_RESULT_SUCCESS; |
1928 | goto bail; | |
1929 | } | |
1930 | /* FALLTHROUGH */ | |
1931 | default: | |
1932 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | |
1933 | ret = reply(smp); | |
1934 | goto bail; | |
1935 | } | |
1936 | ||
1937 | case IB_MGMT_METHOD_SET: | |
1938 | switch (smp->attr_id) { | |
1939 | case IB_SMP_ATTR_GUID_INFO: | |
1940 | ret = subn_set_guidinfo(smp, ibdev, port); | |
1941 | goto bail; | |
1942 | case IB_SMP_ATTR_PORT_INFO: | |
1943 | ret = subn_set_portinfo(smp, ibdev, port); | |
1944 | goto bail; | |
1945 | case IB_SMP_ATTR_PKEY_TABLE: | |
1946 | ret = subn_set_pkeytable(smp, ibdev, port); | |
1947 | goto bail; | |
1948 | case IB_SMP_ATTR_SL_TO_VL_TABLE: | |
1949 | ret = subn_set_sl_to_vl(smp, ibdev, port); | |
1950 | goto bail; | |
1951 | case IB_SMP_ATTR_VL_ARB_TABLE: | |
1952 | ret = subn_set_vl_arb(smp, ibdev, port); | |
1953 | goto bail; | |
1954 | case IB_SMP_ATTR_SM_INFO: | |
f24a6d48 | 1955 | if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) { |
f931551b RC |
1956 | ret = IB_MAD_RESULT_SUCCESS | |
1957 | IB_MAD_RESULT_CONSUMED; | |
1958 | goto bail; | |
1959 | } | |
f24a6d48 | 1960 | if (ibp->rvp.port_cap_flags & IB_PORT_SM) { |
f931551b RC |
1961 | ret = IB_MAD_RESULT_SUCCESS; |
1962 | goto bail; | |
1963 | } | |
1964 | /* FALLTHROUGH */ | |
1965 | default: | |
1966 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | |
1967 | ret = reply(smp); | |
1968 | goto bail; | |
1969 | } | |
1970 | ||
1971 | case IB_MGMT_METHOD_TRAP_REPRESS: | |
1972 | if (smp->attr_id == IB_SMP_ATTR_NOTICE) | |
1973 | ret = subn_trap_repress(smp, ibdev, port); | |
1974 | else { | |
1975 | smp->status |= IB_SMP_UNSUP_METH_ATTR; | |
1976 | ret = reply(smp); | |
1977 | } | |
1978 | goto bail; | |
1979 | ||
1980 | case IB_MGMT_METHOD_TRAP: | |
1981 | case IB_MGMT_METHOD_REPORT: | |
1982 | case IB_MGMT_METHOD_REPORT_RESP: | |
1983 | case IB_MGMT_METHOD_GET_RESP: | |
1984 | /* | |
1985 | * The ib_mad module will call us to process responses | |
1986 | * before checking for other consumers. | |
1987 | * Just tell the caller to process it normally. | |
1988 | */ | |
1989 | ret = IB_MAD_RESULT_SUCCESS; | |
1990 | goto bail; | |
1991 | ||
1992 | case IB_MGMT_METHOD_SEND: | |
1993 | if (ib_get_smp_direction(smp) && | |
1994 | smp->attr_id == QIB_VENDOR_IPG) { | |
1995 | ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT, | |
1996 | smp->data[0]); | |
1997 | ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; | |
1998 | } else | |
1999 | ret = IB_MAD_RESULT_SUCCESS; | |
2000 | goto bail; | |
2001 | ||
2002 | default: | |
2003 | smp->status |= IB_SMP_UNSUP_METHOD; | |
2004 | ret = reply(smp); | |
2005 | } | |
2006 | ||
2007 | bail: | |
2008 | return ret; | |
2009 | } | |
2010 | ||
2011 | static int process_perf(struct ib_device *ibdev, u8 port, | |
a97e2d86 | 2012 | const struct ib_mad *in_mad, |
f931551b RC |
2013 | struct ib_mad *out_mad) |
2014 | { | |
6aea213a | 2015 | struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; |
f931551b RC |
2016 | int ret; |
2017 | ||
2018 | *out_mad = *in_mad; | |
6aea213a OG |
2019 | if (pmp->mad_hdr.class_version != 1) { |
2020 | pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; | |
f931551b RC |
2021 | ret = reply((struct ib_smp *) pmp); |
2022 | goto bail; | |
2023 | } | |
2024 | ||
6aea213a | 2025 | switch (pmp->mad_hdr.method) { |
f931551b | 2026 | case IB_MGMT_METHOD_GET: |
6aea213a | 2027 | switch (pmp->mad_hdr.attr_id) { |
f931551b RC |
2028 | case IB_PMA_CLASS_PORT_INFO: |
2029 | ret = pma_get_classportinfo(pmp, ibdev); | |
2030 | goto bail; | |
2031 | case IB_PMA_PORT_SAMPLES_CONTROL: | |
2032 | ret = pma_get_portsamplescontrol(pmp, ibdev, port); | |
2033 | goto bail; | |
2034 | case IB_PMA_PORT_SAMPLES_RESULT: | |
2035 | ret = pma_get_portsamplesresult(pmp, ibdev, port); | |
2036 | goto bail; | |
2037 | case IB_PMA_PORT_SAMPLES_RESULT_EXT: | |
2038 | ret = pma_get_portsamplesresult_ext(pmp, ibdev, port); | |
2039 | goto bail; | |
2040 | case IB_PMA_PORT_COUNTERS: | |
2041 | ret = pma_get_portcounters(pmp, ibdev, port); | |
2042 | goto bail; | |
2043 | case IB_PMA_PORT_COUNTERS_EXT: | |
2044 | ret = pma_get_portcounters_ext(pmp, ibdev, port); | |
2045 | goto bail; | |
2046 | case IB_PMA_PORT_COUNTERS_CONG: | |
2047 | ret = pma_get_portcounters_cong(pmp, ibdev, port); | |
2048 | goto bail; | |
2049 | default: | |
6aea213a | 2050 | pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; |
f931551b RC |
2051 | ret = reply((struct ib_smp *) pmp); |
2052 | goto bail; | |
2053 | } | |
2054 | ||
2055 | case IB_MGMT_METHOD_SET: | |
6aea213a | 2056 | switch (pmp->mad_hdr.attr_id) { |
f931551b RC |
2057 | case IB_PMA_PORT_SAMPLES_CONTROL: |
2058 | ret = pma_set_portsamplescontrol(pmp, ibdev, port); | |
2059 | goto bail; | |
2060 | case IB_PMA_PORT_COUNTERS: | |
2061 | ret = pma_set_portcounters(pmp, ibdev, port); | |
2062 | goto bail; | |
2063 | case IB_PMA_PORT_COUNTERS_EXT: | |
2064 | ret = pma_set_portcounters_ext(pmp, ibdev, port); | |
2065 | goto bail; | |
2066 | case IB_PMA_PORT_COUNTERS_CONG: | |
2067 | ret = pma_set_portcounters_cong(pmp, ibdev, port); | |
2068 | goto bail; | |
2069 | default: | |
6aea213a | 2070 | pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; |
f931551b RC |
2071 | ret = reply((struct ib_smp *) pmp); |
2072 | goto bail; | |
2073 | } | |
2074 | ||
2075 | case IB_MGMT_METHOD_TRAP: | |
2076 | case IB_MGMT_METHOD_GET_RESP: | |
2077 | /* | |
2078 | * The ib_mad module will call us to process responses | |
2079 | * before checking for other consumers. | |
2080 | * Just tell the caller to process it normally. | |
2081 | */ | |
2082 | ret = IB_MAD_RESULT_SUCCESS; | |
2083 | goto bail; | |
2084 | ||
2085 | default: | |
6aea213a | 2086 | pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; |
f931551b RC |
2087 | ret = reply((struct ib_smp *) pmp); |
2088 | } | |
2089 | ||
2090 | bail: | |
2091 | return ret; | |
2092 | } | |
2093 | ||
36a8f01c MM |
2094 | static int cc_get_classportinfo(struct ib_cc_mad *ccp, |
2095 | struct ib_device *ibdev) | |
2096 | { | |
2097 | struct ib_cc_classportinfo_attr *p = | |
2098 | (struct ib_cc_classportinfo_attr *)ccp->mgmt_data; | |
2099 | ||
2100 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | |
2101 | ||
2102 | p->base_version = 1; | |
2103 | p->class_version = 1; | |
2104 | p->cap_mask = 0; | |
2105 | ||
2106 | /* | |
2107 | * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. | |
2108 | */ | |
2109 | p->resp_time_value = 18; | |
2110 | ||
2111 | return reply((struct ib_smp *) ccp); | |
2112 | } | |
2113 | ||
2114 | static int cc_get_congestion_info(struct ib_cc_mad *ccp, | |
2115 | struct ib_device *ibdev, u8 port) | |
2116 | { | |
2117 | struct ib_cc_info_attr *p = | |
2118 | (struct ib_cc_info_attr *)ccp->mgmt_data; | |
2119 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
2120 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
2121 | ||
2122 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | |
2123 | ||
2124 | p->congestion_info = 0; | |
2125 | p->control_table_cap = ppd->cc_max_table_entries; | |
2126 | ||
2127 | return reply((struct ib_smp *) ccp); | |
2128 | } | |
2129 | ||
2130 | static int cc_get_congestion_setting(struct ib_cc_mad *ccp, | |
2131 | struct ib_device *ibdev, u8 port) | |
2132 | { | |
2133 | int i; | |
2134 | struct ib_cc_congestion_setting_attr *p = | |
2135 | (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; | |
2136 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
2137 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
2138 | struct ib_cc_congestion_entry_shadow *entries; | |
2139 | ||
2140 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | |
2141 | ||
2142 | spin_lock(&ppd->cc_shadow_lock); | |
2143 | ||
2144 | entries = ppd->congestion_entries_shadow->entries; | |
2145 | p->port_control = cpu_to_be16( | |
2146 | ppd->congestion_entries_shadow->port_control); | |
2147 | p->control_map = cpu_to_be16( | |
2148 | ppd->congestion_entries_shadow->control_map); | |
2149 | for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { | |
2150 | p->entries[i].ccti_increase = entries[i].ccti_increase; | |
2151 | p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); | |
2152 | p->entries[i].trigger_threshold = entries[i].trigger_threshold; | |
2153 | p->entries[i].ccti_min = entries[i].ccti_min; | |
2154 | } | |
2155 | ||
2156 | spin_unlock(&ppd->cc_shadow_lock); | |
2157 | ||
2158 | return reply((struct ib_smp *) ccp); | |
2159 | } | |
2160 | ||
2161 | static int cc_get_congestion_control_table(struct ib_cc_mad *ccp, | |
2162 | struct ib_device *ibdev, u8 port) | |
2163 | { | |
2164 | struct ib_cc_table_attr *p = | |
2165 | (struct ib_cc_table_attr *)ccp->mgmt_data; | |
2166 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
2167 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
2168 | u32 cct_block_index = be32_to_cpu(ccp->attr_mod); | |
2169 | u32 max_cct_block; | |
2170 | u32 cct_entry; | |
2171 | struct ib_cc_table_entry_shadow *entries; | |
2172 | int i; | |
2173 | ||
2174 | /* Is the table index more than what is supported? */ | |
2175 | if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) | |
2176 | goto bail; | |
2177 | ||
2178 | memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); | |
2179 | ||
2180 | spin_lock(&ppd->cc_shadow_lock); | |
2181 | ||
2182 | max_cct_block = | |
2183 | (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES; | |
2184 | max_cct_block = max_cct_block ? max_cct_block - 1 : 0; | |
2185 | ||
2186 | if (cct_block_index > max_cct_block) { | |
2187 | spin_unlock(&ppd->cc_shadow_lock); | |
2188 | goto bail; | |
2189 | } | |
2190 | ||
2191 | ccp->attr_mod = cpu_to_be32(cct_block_index); | |
2192 | ||
2193 | cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1); | |
2194 | ||
2195 | cct_entry--; | |
2196 | ||
2197 | p->ccti_limit = cpu_to_be16(cct_entry); | |
2198 | ||
2199 | entries = &ppd->ccti_entries_shadow-> | |
2200 | entries[IB_CCT_ENTRIES * cct_block_index]; | |
2201 | cct_entry %= IB_CCT_ENTRIES; | |
2202 | ||
2203 | for (i = 0; i <= cct_entry; i++) | |
2204 | p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry); | |
2205 | ||
2206 | spin_unlock(&ppd->cc_shadow_lock); | |
2207 | ||
2208 | return reply((struct ib_smp *) ccp); | |
2209 | ||
2210 | bail: | |
2211 | return reply_failure((struct ib_smp *) ccp); | |
2212 | } | |
2213 | ||
2214 | static int cc_set_congestion_setting(struct ib_cc_mad *ccp, | |
2215 | struct ib_device *ibdev, u8 port) | |
2216 | { | |
2217 | struct ib_cc_congestion_setting_attr *p = | |
2218 | (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; | |
2219 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
2220 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
2221 | int i; | |
2222 | ||
2223 | ppd->cc_sl_control_map = be16_to_cpu(p->control_map); | |
2224 | ||
2225 | for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { | |
2226 | ppd->congestion_entries[i].ccti_increase = | |
2227 | p->entries[i].ccti_increase; | |
2228 | ||
2229 | ppd->congestion_entries[i].ccti_timer = | |
2230 | be16_to_cpu(p->entries[i].ccti_timer); | |
2231 | ||
2232 | ppd->congestion_entries[i].trigger_threshold = | |
2233 | p->entries[i].trigger_threshold; | |
2234 | ||
2235 | ppd->congestion_entries[i].ccti_min = | |
2236 | p->entries[i].ccti_min; | |
2237 | } | |
2238 | ||
2239 | return reply((struct ib_smp *) ccp); | |
2240 | } | |
2241 | ||
2242 | static int cc_set_congestion_control_table(struct ib_cc_mad *ccp, | |
2243 | struct ib_device *ibdev, u8 port) | |
2244 | { | |
2245 | struct ib_cc_table_attr *p = | |
2246 | (struct ib_cc_table_attr *)ccp->mgmt_data; | |
2247 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
2248 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
2249 | u32 cct_block_index = be32_to_cpu(ccp->attr_mod); | |
2250 | u32 cct_entry; | |
2251 | struct ib_cc_table_entry_shadow *entries; | |
2252 | int i; | |
2253 | ||
2254 | /* Is the table index more than what is supported? */ | |
2255 | if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) | |
2256 | goto bail; | |
2257 | ||
2258 | /* If this packet is the first in the sequence then | |
2259 | * zero the total table entry count. | |
2260 | */ | |
2261 | if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES) | |
2262 | ppd->total_cct_entry = 0; | |
2263 | ||
2264 | cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES; | |
2265 | ||
2266 | /* ccti_limit is 0 to 63 */ | |
2267 | ppd->total_cct_entry += (cct_entry + 1); | |
2268 | ||
2269 | if (ppd->total_cct_entry > ppd->cc_supported_table_entries) | |
2270 | goto bail; | |
2271 | ||
2272 | ppd->ccti_limit = be16_to_cpu(p->ccti_limit); | |
2273 | ||
2274 | entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index); | |
2275 | ||
2276 | for (i = 0; i <= cct_entry; i++) | |
2277 | entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry); | |
2278 | ||
2279 | spin_lock(&ppd->cc_shadow_lock); | |
2280 | ||
2281 | ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1; | |
2282 | memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries, | |
2283 | (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry))); | |
2284 | ||
2285 | ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED; | |
2286 | ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map; | |
2287 | memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries, | |
2288 | IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry)); | |
2289 | ||
2290 | spin_unlock(&ppd->cc_shadow_lock); | |
2291 | ||
2292 | return reply((struct ib_smp *) ccp); | |
2293 | ||
2294 | bail: | |
2295 | return reply_failure((struct ib_smp *) ccp); | |
2296 | } | |
2297 | ||
2298 | static int check_cc_key(struct qib_ibport *ibp, | |
2299 | struct ib_cc_mad *ccp, int mad_flags) | |
2300 | { | |
2301 | return 0; | |
2302 | } | |
2303 | ||
2304 | static int process_cc(struct ib_device *ibdev, int mad_flags, | |
a97e2d86 | 2305 | u8 port, const struct ib_mad *in_mad, |
36a8f01c MM |
2306 | struct ib_mad *out_mad) |
2307 | { | |
2308 | struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad; | |
2309 | struct qib_ibport *ibp = to_iport(ibdev, port); | |
2310 | int ret; | |
2311 | ||
2312 | *out_mad = *in_mad; | |
2313 | ||
2314 | if (ccp->class_version != 2) { | |
2315 | ccp->status |= IB_SMP_UNSUP_VERSION; | |
2316 | ret = reply((struct ib_smp *)ccp); | |
2317 | goto bail; | |
2318 | } | |
2319 | ||
2320 | ret = check_cc_key(ibp, ccp, mad_flags); | |
2321 | if (ret) | |
2322 | goto bail; | |
2323 | ||
2324 | switch (ccp->method) { | |
2325 | case IB_MGMT_METHOD_GET: | |
2326 | switch (ccp->attr_id) { | |
2327 | case IB_CC_ATTR_CLASSPORTINFO: | |
2328 | ret = cc_get_classportinfo(ccp, ibdev); | |
2329 | goto bail; | |
2330 | ||
2331 | case IB_CC_ATTR_CONGESTION_INFO: | |
2332 | ret = cc_get_congestion_info(ccp, ibdev, port); | |
2333 | goto bail; | |
2334 | ||
2335 | case IB_CC_ATTR_CA_CONGESTION_SETTING: | |
2336 | ret = cc_get_congestion_setting(ccp, ibdev, port); | |
2337 | goto bail; | |
2338 | ||
2339 | case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: | |
2340 | ret = cc_get_congestion_control_table(ccp, ibdev, port); | |
2341 | goto bail; | |
2342 | ||
2343 | /* FALLTHROUGH */ | |
2344 | default: | |
2345 | ccp->status |= IB_SMP_UNSUP_METH_ATTR; | |
2346 | ret = reply((struct ib_smp *) ccp); | |
2347 | goto bail; | |
2348 | } | |
2349 | ||
2350 | case IB_MGMT_METHOD_SET: | |
2351 | switch (ccp->attr_id) { | |
2352 | case IB_CC_ATTR_CA_CONGESTION_SETTING: | |
2353 | ret = cc_set_congestion_setting(ccp, ibdev, port); | |
2354 | goto bail; | |
2355 | ||
2356 | case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: | |
2357 | ret = cc_set_congestion_control_table(ccp, ibdev, port); | |
2358 | goto bail; | |
2359 | ||
2360 | /* FALLTHROUGH */ | |
2361 | default: | |
2362 | ccp->status |= IB_SMP_UNSUP_METH_ATTR; | |
2363 | ret = reply((struct ib_smp *) ccp); | |
2364 | goto bail; | |
2365 | } | |
2366 | ||
2367 | case IB_MGMT_METHOD_GET_RESP: | |
2368 | /* | |
2369 | * The ib_mad module will call us to process responses | |
2370 | * before checking for other consumers. | |
2371 | * Just tell the caller to process it normally. | |
2372 | */ | |
2373 | ret = IB_MAD_RESULT_SUCCESS; | |
2374 | goto bail; | |
2375 | ||
2376 | case IB_MGMT_METHOD_TRAP: | |
2377 | default: | |
2378 | ccp->status |= IB_SMP_UNSUP_METHOD; | |
2379 | ret = reply((struct ib_smp *) ccp); | |
2380 | } | |
2381 | ||
2382 | bail: | |
2383 | return ret; | |
2384 | } | |
2385 | ||
f931551b RC |
2386 | /** |
2387 | * qib_process_mad - process an incoming MAD packet | |
2388 | * @ibdev: the infiniband device this packet came in on | |
2389 | * @mad_flags: MAD flags | |
2390 | * @port: the port number this packet came in on | |
2391 | * @in_wc: the work completion entry for this packet | |
2392 | * @in_grh: the global route header for this packet | |
2393 | * @in_mad: the incoming MAD | |
2394 | * @out_mad: any outgoing MAD reply | |
2395 | * | |
2396 | * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not | |
2397 | * interested in processing. | |
2398 | * | |
2399 | * Note that the verbs framework has already done the MAD sanity checks, | |
2400 | * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE | |
2401 | * MADs. | |
2402 | * | |
2403 | * This is called by the ib_mad module. | |
2404 | */ | |
2405 | int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, | |
a97e2d86 | 2406 | const struct ib_wc *in_wc, const struct ib_grh *in_grh, |
4cd7c947 IW |
2407 | const struct ib_mad_hdr *in, size_t in_mad_size, |
2408 | struct ib_mad_hdr *out, size_t *out_mad_size, | |
2409 | u16 *out_mad_pkey_index) | |
f931551b RC |
2410 | { |
2411 | int ret; | |
36a8f01c MM |
2412 | struct qib_ibport *ibp = to_iport(ibdev, port); |
2413 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | |
4cd7c947 IW |
2414 | const struct ib_mad *in_mad = (const struct ib_mad *)in; |
2415 | struct ib_mad *out_mad = (struct ib_mad *)out; | |
2416 | ||
3b8ab700 IW |
2417 | if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || |
2418 | *out_mad_size != sizeof(*out_mad))) | |
2419 | return IB_MAD_RESULT_FAILURE; | |
f931551b RC |
2420 | |
2421 | switch (in_mad->mad_hdr.mgmt_class) { | |
2422 | case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: | |
2423 | case IB_MGMT_CLASS_SUBN_LID_ROUTED: | |
2424 | ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); | |
2425 | goto bail; | |
2426 | ||
2427 | case IB_MGMT_CLASS_PERF_MGMT: | |
2428 | ret = process_perf(ibdev, port, in_mad, out_mad); | |
2429 | goto bail; | |
2430 | ||
36a8f01c MM |
2431 | case IB_MGMT_CLASS_CONG_MGMT: |
2432 | if (!ppd->congestion_entries_shadow || | |
2433 | !qib_cc_table_size) { | |
2434 | ret = IB_MAD_RESULT_SUCCESS; | |
2435 | goto bail; | |
2436 | } | |
2437 | ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad); | |
2438 | goto bail; | |
2439 | ||
f931551b RC |
2440 | default: |
2441 | ret = IB_MAD_RESULT_SUCCESS; | |
2442 | } | |
2443 | ||
2444 | bail: | |
2445 | return ret; | |
2446 | } | |
2447 | ||
2448 | static void send_handler(struct ib_mad_agent *agent, | |
2449 | struct ib_mad_send_wc *mad_send_wc) | |
2450 | { | |
2451 | ib_free_send_mad(mad_send_wc->send_buf); | |
2452 | } | |
2453 | ||
2454 | static void xmit_wait_timer_func(unsigned long opaque) | |
2455 | { | |
2456 | struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; | |
2457 | struct qib_devdata *dd = dd_from_ppd(ppd); | |
2458 | unsigned long flags; | |
2459 | u8 status; | |
2460 | ||
f24a6d48 | 2461 | spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); |
f931551b RC |
2462 | if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { |
2463 | status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); | |
2464 | if (status == IB_PMA_SAMPLE_STATUS_DONE) { | |
2465 | /* save counter cache */ | |
2466 | cache_hw_sample_counters(ppd); | |
2467 | ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; | |
2468 | } else | |
2469 | goto done; | |
2470 | } | |
2471 | ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); | |
2472 | dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); | |
2473 | done: | |
f24a6d48 | 2474 | spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); |
f931551b RC |
2475 | mod_timer(&ppd->cong_stats.timer, jiffies + HZ); |
2476 | } | |
2477 | ||
2478 | int qib_create_agents(struct qib_ibdev *dev) | |
2479 | { | |
2480 | struct qib_devdata *dd = dd_from_dev(dev); | |
2481 | struct ib_mad_agent *agent; | |
2482 | struct qib_ibport *ibp; | |
2483 | int p; | |
2484 | int ret; | |
2485 | ||
2486 | for (p = 0; p < dd->num_pports; p++) { | |
2487 | ibp = &dd->pport[p].ibport_data; | |
2dc05ab5 DD |
2488 | agent = ib_register_mad_agent(&dev->rdi.ibdev, p + 1, |
2489 | IB_QPT_SMI, | |
f931551b | 2490 | NULL, 0, send_handler, |
0f29b46d | 2491 | NULL, NULL, 0); |
f931551b RC |
2492 | if (IS_ERR(agent)) { |
2493 | ret = PTR_ERR(agent); | |
2494 | goto err; | |
2495 | } | |
2496 | ||
2497 | /* Initialize xmit_wait structure */ | |
2498 | dd->pport[p].cong_stats.counter = 0; | |
2499 | init_timer(&dd->pport[p].cong_stats.timer); | |
2500 | dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func; | |
2501 | dd->pport[p].cong_stats.timer.data = | |
2502 | (unsigned long)(&dd->pport[p]); | |
2503 | dd->pport[p].cong_stats.timer.expires = 0; | |
2504 | add_timer(&dd->pport[p].cong_stats.timer); | |
2505 | ||
f24a6d48 | 2506 | ibp->rvp.send_agent = agent; |
f931551b RC |
2507 | } |
2508 | ||
2509 | return 0; | |
2510 | ||
2511 | err: | |
2512 | for (p = 0; p < dd->num_pports; p++) { | |
2513 | ibp = &dd->pport[p].ibport_data; | |
f24a6d48 HC |
2514 | if (ibp->rvp.send_agent) { |
2515 | agent = ibp->rvp.send_agent; | |
2516 | ibp->rvp.send_agent = NULL; | |
f931551b RC |
2517 | ib_unregister_mad_agent(agent); |
2518 | } | |
2519 | } | |
2520 | ||
2521 | return ret; | |
2522 | } | |
2523 | ||
2524 | void qib_free_agents(struct qib_ibdev *dev) | |
2525 | { | |
2526 | struct qib_devdata *dd = dd_from_dev(dev); | |
2527 | struct ib_mad_agent *agent; | |
2528 | struct qib_ibport *ibp; | |
2529 | int p; | |
2530 | ||
2531 | for (p = 0; p < dd->num_pports; p++) { | |
2532 | ibp = &dd->pport[p].ibport_data; | |
f24a6d48 HC |
2533 | if (ibp->rvp.send_agent) { |
2534 | agent = ibp->rvp.send_agent; | |
2535 | ibp->rvp.send_agent = NULL; | |
f931551b RC |
2536 | ib_unregister_mad_agent(agent); |
2537 | } | |
2538 | if (ibp->sm_ah) { | |
2539 | ib_destroy_ah(&ibp->sm_ah->ibah); | |
2540 | ibp->sm_ah = NULL; | |
2541 | } | |
2542 | if (dd->pport[p].cong_stats.timer.data) | |
2543 | del_timer_sync(&dd->pport[p].cong_stats.timer); | |
2544 | } | |
2545 | } |