IB/core: Integrate IB address resolution module into core
[deliverable/linux.git] / drivers / infiniband / core / mad.c
CommitLineData
1da177e4 1/*
de493d47 2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
fa619a77
HR
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
b76aabc3 5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
8e4349d1 6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
1da177e4
LT
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
1da177e4 36 */
7ef5d4b0
IW
37
38#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39
1da177e4 40#include <linux/dma-mapping.h>
5a0e3ad6 41#include <linux/slab.h>
e4dd23d7 42#include <linux/module.h>
9874e746 43#include <rdma/ib_cache.h>
1da177e4
LT
44
45#include "mad_priv.h"
fa619a77 46#include "mad_rmpp.h"
1da177e4 47#include "smi.h"
8e4349d1 48#include "opa_smi.h"
1da177e4
LT
49#include "agent.h"
50
51MODULE_LICENSE("Dual BSD/GPL");
52MODULE_DESCRIPTION("kernel IB MAD API");
53MODULE_AUTHOR("Hal Rosenstock");
54MODULE_AUTHOR("Sean Hefty");
55
16933955
RD
56static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
57static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
b76aabc3
HR
58
59module_param_named(send_queue_size, mad_sendq_size, int, 0444);
60MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
61module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
62MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
63
1da177e4
LT
64static struct list_head ib_mad_port_list;
65static u32 ib_mad_client_id = 0;
66
67/* Port list lock */
6276e08a 68static DEFINE_SPINLOCK(ib_mad_port_list_lock);
1da177e4
LT
69
70/* Forward declarations */
71static int method_in_use(struct ib_mad_mgmt_method_table **method,
72 struct ib_mad_reg_req *mad_reg_req);
73static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
74static struct ib_mad_agent_private *find_mad_agent(
75 struct ib_mad_port_private *port_priv,
d94bd266 76 const struct ib_mad_hdr *mad);
1da177e4
LT
77static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
78 struct ib_mad_private *mad);
79static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
c4028958
DH
80static void timeout_sends(struct work_struct *work);
81static void local_completions(struct work_struct *work);
1da177e4
LT
82static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
83 struct ib_mad_agent_private *agent_priv,
84 u8 mgmt_class);
85static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv);
d53e11fd
CH
87static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
88 struct ib_wc *wc);
89static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
1da177e4
LT
90
91/*
92 * Returns a ib_mad_port_private structure or NULL for a device/port
93 * Assumes ib_mad_port_list_lock is being held
94 */
95static inline struct ib_mad_port_private *
96__ib_get_mad_port(struct ib_device *device, int port_num)
97{
98 struct ib_mad_port_private *entry;
99
100 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
101 if (entry->device == device && entry->port_num == port_num)
102 return entry;
103 }
104 return NULL;
105}
106
107/*
108 * Wrapper function to return a ib_mad_port_private structure or NULL
109 * for a device/port
110 */
111static inline struct ib_mad_port_private *
112ib_get_mad_port(struct ib_device *device, int port_num)
113{
114 struct ib_mad_port_private *entry;
115 unsigned long flags;
116
117 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
118 entry = __ib_get_mad_port(device, port_num);
119 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
120
121 return entry;
122}
123
124static inline u8 convert_mgmt_class(u8 mgmt_class)
125{
126 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
127 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
128 0 : mgmt_class;
129}
130
131static int get_spl_qp_index(enum ib_qp_type qp_type)
132{
133 switch (qp_type)
134 {
135 case IB_QPT_SMI:
136 return 0;
137 case IB_QPT_GSI:
138 return 1;
139 default:
140 return -1;
141 }
142}
143
144static int vendor_class_index(u8 mgmt_class)
145{
146 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
147}
148
149static int is_vendor_class(u8 mgmt_class)
150{
151 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
152 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
153 return 0;
154 return 1;
155}
156
157static int is_vendor_oui(char *oui)
158{
159 if (oui[0] || oui[1] || oui[2])
160 return 1;
161 return 0;
162}
163
164static int is_vendor_method_in_use(
165 struct ib_mad_mgmt_vendor_class *vendor_class,
166 struct ib_mad_reg_req *mad_reg_req)
167{
168 struct ib_mad_mgmt_method_table *method;
169 int i;
170
171 for (i = 0; i < MAX_MGMT_OUI; i++) {
172 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
173 method = vendor_class->method_table[i];
174 if (method) {
175 if (method_in_use(&method, mad_reg_req))
176 return 1;
177 else
178 break;
179 }
180 }
181 }
182 return 0;
183}
184
96909308 185int ib_response_mad(const struct ib_mad_hdr *hdr)
2527e681 186{
96909308
IW
187 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
188 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
189 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
190 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
2527e681
SH
191}
192EXPORT_SYMBOL(ib_response_mad);
193
1da177e4
LT
194/*
195 * ib_register_mad_agent - Register to send/receive MADs
196 */
197struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
198 u8 port_num,
199 enum ib_qp_type qp_type,
200 struct ib_mad_reg_req *mad_reg_req,
201 u8 rmpp_version,
202 ib_mad_send_handler send_handler,
203 ib_mad_recv_handler recv_handler,
0f29b46d
IW
204 void *context,
205 u32 registration_flags)
1da177e4
LT
206{
207 struct ib_mad_port_private *port_priv;
208 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
209 struct ib_mad_agent_private *mad_agent_priv;
210 struct ib_mad_reg_req *reg_req = NULL;
211 struct ib_mad_mgmt_class_table *class;
212 struct ib_mad_mgmt_vendor_class_table *vendor;
213 struct ib_mad_mgmt_vendor_class *vendor_class;
214 struct ib_mad_mgmt_method_table *method;
215 int ret2, qpn;
216 unsigned long flags;
217 u8 mgmt_class, vclass;
218
219 /* Validate parameters */
220 qpn = get_spl_qp_index(qp_type);
9ad13a42
IW
221 if (qpn == -1) {
222 dev_notice(&device->dev,
223 "ib_register_mad_agent: invalid QP Type %d\n",
224 qp_type);
1da177e4 225 goto error1;
9ad13a42 226 }
1da177e4 227
9ad13a42
IW
228 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
229 dev_notice(&device->dev,
230 "ib_register_mad_agent: invalid RMPP Version %u\n",
231 rmpp_version);
fa619a77 232 goto error1;
9ad13a42 233 }
1da177e4
LT
234
235 /* Validate MAD registration request if supplied */
236 if (mad_reg_req) {
9ad13a42
IW
237 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
238 dev_notice(&device->dev,
239 "ib_register_mad_agent: invalid Class Version %u\n",
240 mad_reg_req->mgmt_class_version);
1da177e4 241 goto error1;
9ad13a42
IW
242 }
243 if (!recv_handler) {
244 dev_notice(&device->dev,
245 "ib_register_mad_agent: no recv_handler\n");
1da177e4 246 goto error1;
9ad13a42 247 }
1da177e4
LT
248 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
249 /*
250 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
251 * one in this range currently allowed
252 */
253 if (mad_reg_req->mgmt_class !=
9ad13a42
IW
254 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
255 dev_notice(&device->dev,
256 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
257 mad_reg_req->mgmt_class);
1da177e4 258 goto error1;
9ad13a42 259 }
1da177e4
LT
260 } else if (mad_reg_req->mgmt_class == 0) {
261 /*
262 * Class 0 is reserved in IBA and is used for
263 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
264 */
9ad13a42
IW
265 dev_notice(&device->dev,
266 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
1da177e4
LT
267 goto error1;
268 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
269 /*
270 * If class is in "new" vendor range,
271 * ensure supplied OUI is not zero
272 */
9ad13a42
IW
273 if (!is_vendor_oui(mad_reg_req->oui)) {
274 dev_notice(&device->dev,
275 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
276 mad_reg_req->mgmt_class);
1da177e4 277 goto error1;
9ad13a42 278 }
1da177e4 279 }
618a3c03 280 /* Make sure class supplied is consistent with RMPP */
64cb9c6a 281 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
9ad13a42
IW
282 if (rmpp_version) {
283 dev_notice(&device->dev,
284 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
285 mad_reg_req->mgmt_class);
618a3c03 286 goto error1;
9ad13a42 287 }
618a3c03 288 }
1471cb6c 289
1da177e4
LT
290 /* Make sure class supplied is consistent with QP type */
291 if (qp_type == IB_QPT_SMI) {
292 if ((mad_reg_req->mgmt_class !=
293 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
294 (mad_reg_req->mgmt_class !=
9ad13a42
IW
295 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
296 dev_notice(&device->dev,
297 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
298 mad_reg_req->mgmt_class);
1da177e4 299 goto error1;
9ad13a42 300 }
1da177e4
LT
301 } else {
302 if ((mad_reg_req->mgmt_class ==
303 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
304 (mad_reg_req->mgmt_class ==
9ad13a42
IW
305 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
306 dev_notice(&device->dev,
307 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
308 mad_reg_req->mgmt_class);
1da177e4 309 goto error1;
9ad13a42 310 }
1da177e4
LT
311 }
312 } else {
313 /* No registration request supplied */
314 if (!send_handler)
315 goto error1;
1471cb6c
IW
316 if (registration_flags & IB_MAD_USER_RMPP)
317 goto error1;
1da177e4
LT
318 }
319
320 /* Validate device and port */
321 port_priv = ib_get_mad_port(device, port_num);
322 if (!port_priv) {
9ad13a42 323 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
1da177e4
LT
324 ret = ERR_PTR(-ENODEV);
325 goto error1;
326 }
327
c8367c4c
IW
328 /* Verify the QP requested is supported. For example, Ethernet devices
329 * will not have QP0 */
330 if (!port_priv->qp_info[qpn].qp) {
9ad13a42
IW
331 dev_notice(&device->dev,
332 "ib_register_mad_agent: QP %d not supported\n", qpn);
c8367c4c
IW
333 ret = ERR_PTR(-EPROTONOSUPPORT);
334 goto error1;
335 }
336
1da177e4 337 /* Allocate structures */
de6eb66b 338 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
1da177e4
LT
339 if (!mad_agent_priv) {
340 ret = ERR_PTR(-ENOMEM);
341 goto error1;
342 }
b82cab6b 343
1da177e4 344 if (mad_reg_req) {
9893e742 345 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
1da177e4
LT
346 if (!reg_req) {
347 ret = ERR_PTR(-ENOMEM);
b82cab6b 348 goto error3;
1da177e4 349 }
1da177e4
LT
350 }
351
352 /* Now, fill in the various structures */
1da177e4
LT
353 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
354 mad_agent_priv->reg_req = reg_req;
fa619a77 355 mad_agent_priv->agent.rmpp_version = rmpp_version;
1da177e4
LT
356 mad_agent_priv->agent.device = device;
357 mad_agent_priv->agent.recv_handler = recv_handler;
358 mad_agent_priv->agent.send_handler = send_handler;
359 mad_agent_priv->agent.context = context;
360 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
361 mad_agent_priv->agent.port_num = port_num;
0f29b46d 362 mad_agent_priv->agent.flags = registration_flags;
d9620a4c
RC
363 spin_lock_init(&mad_agent_priv->lock);
364 INIT_LIST_HEAD(&mad_agent_priv->send_list);
365 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
366 INIT_LIST_HEAD(&mad_agent_priv->done_list);
367 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
368 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
369 INIT_LIST_HEAD(&mad_agent_priv->local_list);
370 INIT_WORK(&mad_agent_priv->local_work, local_completions);
371 atomic_set(&mad_agent_priv->refcount, 1);
372 init_completion(&mad_agent_priv->comp);
1da177e4
LT
373
374 spin_lock_irqsave(&port_priv->reg_lock, flags);
375 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
376
377 /*
378 * Make sure MAD registration (if supplied)
379 * is non overlapping with any existing ones
380 */
381 if (mad_reg_req) {
382 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
383 if (!is_vendor_class(mgmt_class)) {
384 class = port_priv->version[mad_reg_req->
385 mgmt_class_version].class;
386 if (class) {
387 method = class->method_table[mgmt_class];
388 if (method) {
389 if (method_in_use(&method,
390 mad_reg_req))
b82cab6b 391 goto error4;
1da177e4
LT
392 }
393 }
394 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
395 mgmt_class);
396 } else {
397 /* "New" vendor class range */
398 vendor = port_priv->version[mad_reg_req->
399 mgmt_class_version].vendor;
400 if (vendor) {
401 vclass = vendor_class_index(mgmt_class);
402 vendor_class = vendor->vendor_class[vclass];
403 if (vendor_class) {
404 if (is_vendor_method_in_use(
405 vendor_class,
406 mad_reg_req))
b82cab6b 407 goto error4;
1da177e4
LT
408 }
409 }
410 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
411 }
412 if (ret2) {
413 ret = ERR_PTR(ret2);
b82cab6b 414 goto error4;
1da177e4
LT
415 }
416 }
417
418 /* Add mad agent into port's agent list */
419 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
420 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
421
1da177e4
LT
422 return &mad_agent_priv->agent;
423
b82cab6b 424error4:
1da177e4
LT
425 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
426 kfree(reg_req);
b82cab6b 427error3:
2012a116 428 kfree(mad_agent_priv);
1da177e4
LT
429error1:
430 return ret;
431}
432EXPORT_SYMBOL(ib_register_mad_agent);
433
434static inline int is_snooping_sends(int mad_snoop_flags)
435{
436 return (mad_snoop_flags &
437 (/*IB_MAD_SNOOP_POSTED_SENDS |
438 IB_MAD_SNOOP_RMPP_SENDS |*/
439 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
440 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
441}
442
443static inline int is_snooping_recvs(int mad_snoop_flags)
444{
445 return (mad_snoop_flags &
446 (IB_MAD_SNOOP_RECVS /*|
447 IB_MAD_SNOOP_RMPP_RECVS*/));
448}
449
450static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
451 struct ib_mad_snoop_private *mad_snoop_priv)
452{
453 struct ib_mad_snoop_private **new_snoop_table;
454 unsigned long flags;
455 int i;
456
457 spin_lock_irqsave(&qp_info->snoop_lock, flags);
458 /* Check for empty slot in array. */
459 for (i = 0; i < qp_info->snoop_table_size; i++)
460 if (!qp_info->snoop_table[i])
461 break;
462
463 if (i == qp_info->snoop_table_size) {
464 /* Grow table. */
52805174
RD
465 new_snoop_table = krealloc(qp_info->snoop_table,
466 sizeof mad_snoop_priv *
467 (qp_info->snoop_table_size + 1),
468 GFP_ATOMIC);
1da177e4
LT
469 if (!new_snoop_table) {
470 i = -ENOMEM;
471 goto out;
472 }
52805174 473
1da177e4
LT
474 qp_info->snoop_table = new_snoop_table;
475 qp_info->snoop_table_size++;
476 }
477 qp_info->snoop_table[i] = mad_snoop_priv;
478 atomic_inc(&qp_info->snoop_count);
479out:
480 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
481 return i;
482}
483
484struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
485 u8 port_num,
486 enum ib_qp_type qp_type,
487 int mad_snoop_flags,
488 ib_mad_snoop_handler snoop_handler,
489 ib_mad_recv_handler recv_handler,
490 void *context)
491{
492 struct ib_mad_port_private *port_priv;
493 struct ib_mad_agent *ret;
494 struct ib_mad_snoop_private *mad_snoop_priv;
495 int qpn;
496
497 /* Validate parameters */
498 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
499 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
500 ret = ERR_PTR(-EINVAL);
501 goto error1;
502 }
503 qpn = get_spl_qp_index(qp_type);
504 if (qpn == -1) {
505 ret = ERR_PTR(-EINVAL);
506 goto error1;
507 }
508 port_priv = ib_get_mad_port(device, port_num);
509 if (!port_priv) {
510 ret = ERR_PTR(-ENODEV);
511 goto error1;
512 }
513 /* Allocate structures */
de6eb66b 514 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
1da177e4
LT
515 if (!mad_snoop_priv) {
516 ret = ERR_PTR(-ENOMEM);
517 goto error1;
518 }
519
520 /* Now, fill in the various structures */
1da177e4
LT
521 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
522 mad_snoop_priv->agent.device = device;
523 mad_snoop_priv->agent.recv_handler = recv_handler;
524 mad_snoop_priv->agent.snoop_handler = snoop_handler;
525 mad_snoop_priv->agent.context = context;
526 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
527 mad_snoop_priv->agent.port_num = port_num;
528 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
1b52fa98 529 init_completion(&mad_snoop_priv->comp);
1da177e4
LT
530 mad_snoop_priv->snoop_index = register_snoop_agent(
531 &port_priv->qp_info[qpn],
532 mad_snoop_priv);
533 if (mad_snoop_priv->snoop_index < 0) {
534 ret = ERR_PTR(mad_snoop_priv->snoop_index);
535 goto error2;
536 }
537
538 atomic_set(&mad_snoop_priv->refcount, 1);
539 return &mad_snoop_priv->agent;
540
541error2:
542 kfree(mad_snoop_priv);
543error1:
544 return ret;
545}
546EXPORT_SYMBOL(ib_register_mad_snoop);
547
1b52fa98
SH
548static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
549{
550 if (atomic_dec_and_test(&mad_agent_priv->refcount))
551 complete(&mad_agent_priv->comp);
552}
553
554static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
555{
556 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
557 complete(&mad_snoop_priv->comp);
558}
559
1da177e4
LT
560static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
561{
562 struct ib_mad_port_private *port_priv;
563 unsigned long flags;
564
565 /* Note that we could still be handling received MADs */
566
567 /*
568 * Canceling all sends results in dropping received response
569 * MADs, preventing us from queuing additional work
570 */
571 cancel_mads(mad_agent_priv);
1da177e4 572 port_priv = mad_agent_priv->qp_info->port_priv;
1da177e4 573 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
574
575 spin_lock_irqsave(&port_priv->reg_lock, flags);
576 remove_mad_reg_req(mad_agent_priv);
577 list_del(&mad_agent_priv->agent_list);
578 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
579
b82cab6b 580 flush_workqueue(port_priv->wq);
fa619a77 581 ib_cancel_rmpp_recvs(mad_agent_priv);
1da177e4 582
1b52fa98
SH
583 deref_mad_agent(mad_agent_priv);
584 wait_for_completion(&mad_agent_priv->comp);
1da177e4 585
6044ec88 586 kfree(mad_agent_priv->reg_req);
1da177e4
LT
587 kfree(mad_agent_priv);
588}
589
590static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
591{
592 struct ib_mad_qp_info *qp_info;
593 unsigned long flags;
594
595 qp_info = mad_snoop_priv->qp_info;
596 spin_lock_irqsave(&qp_info->snoop_lock, flags);
597 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
598 atomic_dec(&qp_info->snoop_count);
599 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
600
1b52fa98
SH
601 deref_snoop_agent(mad_snoop_priv);
602 wait_for_completion(&mad_snoop_priv->comp);
1da177e4
LT
603
604 kfree(mad_snoop_priv);
605}
606
607/*
608 * ib_unregister_mad_agent - Unregisters a client from using MAD services
609 */
610int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
611{
612 struct ib_mad_agent_private *mad_agent_priv;
613 struct ib_mad_snoop_private *mad_snoop_priv;
614
615 /* If the TID is zero, the agent can only snoop. */
616 if (mad_agent->hi_tid) {
617 mad_agent_priv = container_of(mad_agent,
618 struct ib_mad_agent_private,
619 agent);
620 unregister_mad_agent(mad_agent_priv);
621 } else {
622 mad_snoop_priv = container_of(mad_agent,
623 struct ib_mad_snoop_private,
624 agent);
625 unregister_mad_snoop(mad_snoop_priv);
626 }
627 return 0;
628}
629EXPORT_SYMBOL(ib_unregister_mad_agent);
630
631static void dequeue_mad(struct ib_mad_list_head *mad_list)
632{
633 struct ib_mad_queue *mad_queue;
634 unsigned long flags;
635
636 BUG_ON(!mad_list->mad_queue);
637 mad_queue = mad_list->mad_queue;
638 spin_lock_irqsave(&mad_queue->lock, flags);
639 list_del(&mad_list->list);
640 mad_queue->count--;
641 spin_unlock_irqrestore(&mad_queue->lock, flags);
642}
643
644static void snoop_send(struct ib_mad_qp_info *qp_info,
34816ad9 645 struct ib_mad_send_buf *send_buf,
1da177e4
LT
646 struct ib_mad_send_wc *mad_send_wc,
647 int mad_snoop_flags)
648{
649 struct ib_mad_snoop_private *mad_snoop_priv;
650 unsigned long flags;
651 int i;
652
653 spin_lock_irqsave(&qp_info->snoop_lock, flags);
654 for (i = 0; i < qp_info->snoop_table_size; i++) {
655 mad_snoop_priv = qp_info->snoop_table[i];
656 if (!mad_snoop_priv ||
657 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
658 continue;
659
660 atomic_inc(&mad_snoop_priv->refcount);
661 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
662 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
34816ad9 663 send_buf, mad_send_wc);
1b52fa98 664 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
665 spin_lock_irqsave(&qp_info->snoop_lock, flags);
666 }
667 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
668}
669
670static void snoop_recv(struct ib_mad_qp_info *qp_info,
671 struct ib_mad_recv_wc *mad_recv_wc,
672 int mad_snoop_flags)
673{
674 struct ib_mad_snoop_private *mad_snoop_priv;
675 unsigned long flags;
676 int i;
677
678 spin_lock_irqsave(&qp_info->snoop_lock, flags);
679 for (i = 0; i < qp_info->snoop_table_size; i++) {
680 mad_snoop_priv = qp_info->snoop_table[i];
681 if (!mad_snoop_priv ||
682 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
683 continue;
684
685 atomic_inc(&mad_snoop_priv->refcount);
686 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
ca281265 687 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
1da177e4 688 mad_recv_wc);
1b52fa98 689 deref_snoop_agent(mad_snoop_priv);
1da177e4
LT
690 spin_lock_irqsave(&qp_info->snoop_lock, flags);
691 }
692 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
693}
694
d53e11fd
CH
695static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
696 u16 pkey_index, u8 port_num, struct ib_wc *wc)
1da177e4
LT
697{
698 memset(wc, 0, sizeof *wc);
d53e11fd 699 wc->wr_cqe = cqe;
1da177e4
LT
700 wc->status = IB_WC_SUCCESS;
701 wc->opcode = IB_WC_RECV;
702 wc->pkey_index = pkey_index;
703 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
704 wc->src_qp = IB_QP0;
062dbb69 705 wc->qp = qp;
1da177e4
LT
706 wc->slid = slid;
707 wc->sl = 0;
708 wc->dlid_path_bits = 0;
709 wc->port_num = port_num;
710}
711
c9082e51
IW
712static size_t mad_priv_size(const struct ib_mad_private *mp)
713{
714 return sizeof(struct ib_mad_private) + mp->mad_size;
715}
716
717static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
718{
719 size_t size = sizeof(struct ib_mad_private) + mad_size;
720 struct ib_mad_private *ret = kzalloc(size, flags);
721
722 if (ret)
723 ret->mad_size = mad_size;
724
725 return ret;
726}
727
728static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
729{
730 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
731}
732
733static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
734{
735 return sizeof(struct ib_grh) + mp->mad_size;
736}
737
1da177e4
LT
738/*
739 * Return 0 if SMP is to be sent
740 * Return 1 if SMP was consumed locally (whether or not solicited)
741 * Return < 0 if error
742 */
743static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
34816ad9 744 struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 745{
de493d47 746 int ret = 0;
34816ad9 747 struct ib_smp *smp = mad_send_wr->send_buf.mad;
8e4349d1 748 struct opa_smp *opa_smp = (struct opa_smp *)smp;
1da177e4
LT
749 unsigned long flags;
750 struct ib_mad_local_private *local;
751 struct ib_mad_private *mad_priv;
752 struct ib_mad_port_private *port_priv;
753 struct ib_mad_agent_private *recv_mad_agent = NULL;
754 struct ib_device *device = mad_agent_priv->agent.device;
1bae4dbf 755 u8 port_num;
1da177e4 756 struct ib_wc mad_wc;
e622f2f4 757 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
c9082e51 758 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
4cd7c947 759 u16 out_mad_pkey_index = 0;
8e4349d1
IW
760 u16 drslid;
761 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
762 mad_agent_priv->qp_info->port_priv->port_num);
1da177e4 763
4139032b 764 if (rdma_cap_ib_switch(device) &&
1bae4dbf 765 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
e622f2f4 766 port_num = send_wr->port_num;
1bae4dbf
HR
767 else
768 port_num = mad_agent_priv->agent.port_num;
769
8cf3f04f
RC
770 /*
771 * Directed route handling starts if the initial LID routed part of
772 * a request or the ending LID routed part of a response is empty.
773 * If we are at the start of the LID routed part, don't update the
774 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
775 */
8e4349d1
IW
776 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
777 u32 opa_drslid;
778
779 if ((opa_get_smp_direction(opa_smp)
780 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
781 OPA_LID_PERMISSIVE &&
4139032b
HR
782 opa_smi_handle_dr_smp_send(opa_smp,
783 rdma_cap_ib_switch(device),
8e4349d1
IW
784 port_num) == IB_SMI_DISCARD) {
785 ret = -EINVAL;
786 dev_err(&device->dev, "OPA Invalid directed route\n");
787 goto out;
788 }
789 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
cd4cd565 790 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
8e4349d1
IW
791 opa_drslid & 0xffff0000) {
792 ret = -EINVAL;
793 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
794 opa_drslid);
795 goto out;
796 }
797 drslid = (u16)(opa_drslid & 0x0000ffff);
de493d47 798
8e4349d1
IW
799 /* Check to post send on QP or process locally */
800 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
801 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
802 goto out;
803 } else {
804 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
805 IB_LID_PERMISSIVE &&
4139032b 806 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
8e4349d1
IW
807 IB_SMI_DISCARD) {
808 ret = -EINVAL;
809 dev_err(&device->dev, "Invalid directed route\n");
810 goto out;
811 }
812 drslid = be16_to_cpu(smp->dr_slid);
813
814 /* Check to post send on QP or process locally */
815 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
816 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
817 goto out;
818 }
1da177e4
LT
819
820 local = kmalloc(sizeof *local, GFP_ATOMIC);
821 if (!local) {
822 ret = -ENOMEM;
7ef5d4b0 823 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
1da177e4
LT
824 goto out;
825 }
826 local->mad_priv = NULL;
827 local->recv_mad_agent = NULL;
c9082e51 828 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
1da177e4
LT
829 if (!mad_priv) {
830 ret = -ENOMEM;
7ef5d4b0 831 dev_err(&device->dev, "No memory for local response MAD\n");
1da177e4
LT
832 kfree(local);
833 goto out;
834 }
835
062dbb69 836 build_smp_wc(mad_agent_priv->agent.qp,
d53e11fd 837 send_wr->wr.wr_cqe, drslid,
e622f2f4
CH
838 send_wr->pkey_index,
839 send_wr->port_num, &mad_wc);
1da177e4 840
8e4349d1
IW
841 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
842 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
843 + mad_send_wr->send_buf.data_len
844 + sizeof(struct ib_grh);
845 }
846
1da177e4
LT
847 /* No GRH for DR SMP */
848 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
4cd7c947
IW
849 (const struct ib_mad_hdr *)smp, mad_size,
850 (struct ib_mad_hdr *)mad_priv->mad,
851 &mad_size, &out_mad_pkey_index);
1da177e4
LT
852 switch (ret)
853 {
854 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
c9082e51 855 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
1da177e4
LT
856 mad_agent_priv->agent.recv_handler) {
857 local->mad_priv = mad_priv;
858 local->recv_mad_agent = mad_agent_priv;
859 /*
860 * Reference MAD agent until receive
861 * side of local completion handled
862 */
863 atomic_inc(&mad_agent_priv->refcount);
864 } else
c9082e51 865 kfree(mad_priv);
1da177e4
LT
866 break;
867 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
c9082e51 868 kfree(mad_priv);
4780c195 869 break;
1da177e4
LT
870 case IB_MAD_RESULT_SUCCESS:
871 /* Treat like an incoming receive MAD */
1da177e4
LT
872 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
873 mad_agent_priv->agent.port_num);
874 if (port_priv) {
c9082e51 875 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
1da177e4 876 recv_mad_agent = find_mad_agent(port_priv,
c9082e51 877 (const struct ib_mad_hdr *)mad_priv->mad);
1da177e4
LT
878 }
879 if (!port_priv || !recv_mad_agent) {
4780c195
RC
880 /*
881 * No receiving agent so drop packet and
882 * generate send completion.
883 */
c9082e51 884 kfree(mad_priv);
4780c195 885 break;
1da177e4
LT
886 }
887 local->mad_priv = mad_priv;
888 local->recv_mad_agent = recv_mad_agent;
889 break;
890 default:
c9082e51 891 kfree(mad_priv);
1da177e4
LT
892 kfree(local);
893 ret = -EINVAL;
894 goto out;
895 }
896
34816ad9 897 local->mad_send_wr = mad_send_wr;
8e4349d1 898 if (opa) {
e622f2f4 899 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
8e4349d1
IW
900 local->return_wc_byte_len = mad_size;
901 }
1da177e4
LT
902 /* Reference MAD agent until send side of local completion handled */
903 atomic_inc(&mad_agent_priv->refcount);
904 /* Queue local completion to local list */
905 spin_lock_irqsave(&mad_agent_priv->lock, flags);
906 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
907 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
908 queue_work(mad_agent_priv->qp_info->port_priv->wq,
b82cab6b 909 &mad_agent_priv->local_work);
1da177e4
LT
910 ret = 1;
911out:
912 return ret;
913}
914
548ead17 915static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
824c8ae7
HR
916{
917 int seg_size, pad;
918
548ead17 919 seg_size = mad_size - hdr_len;
824c8ae7
HR
920 if (data_len && seg_size) {
921 pad = seg_size - data_len % seg_size;
f36e1793 922 return pad == seg_size ? 0 : pad;
824c8ae7 923 } else
f36e1793
JM
924 return seg_size;
925}
926
927static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
928{
929 struct ib_rmpp_segment *s, *t;
930
931 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
932 list_del(&s->list);
933 kfree(s);
934 }
935}
936
937static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
548ead17 938 size_t mad_size, gfp_t gfp_mask)
f36e1793
JM
939{
940 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
941 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
942 struct ib_rmpp_segment *seg = NULL;
943 int left, seg_size, pad;
944
548ead17
IW
945 send_buf->seg_size = mad_size - send_buf->hdr_len;
946 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
f36e1793
JM
947 seg_size = send_buf->seg_size;
948 pad = send_wr->pad;
949
950 /* Allocate data segments. */
951 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
952 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
953 if (!seg) {
7ef5d4b0
IW
954 dev_err(&send_buf->mad_agent->device->dev,
955 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
956 sizeof (*seg) + seg_size, gfp_mask);
f36e1793
JM
957 free_send_rmpp_list(send_wr);
958 return -ENOMEM;
959 }
960 seg->num = ++send_buf->seg_count;
961 list_add_tail(&seg->list, &send_wr->rmpp_list);
962 }
963
964 /* Zero any padding */
965 if (pad)
966 memset(seg->data + seg_size - pad, 0, pad);
967
968 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
969 agent.rmpp_version;
970 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
971 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
972
973 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
974 struct ib_rmpp_segment, list);
975 send_wr->last_ack_seg = send_wr->cur_seg;
976 return 0;
824c8ae7
HR
977}
978
f766c58f 979int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1471cb6c
IW
980{
981 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
982}
983EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
984
824c8ae7
HR
985struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
986 u32 remote_qpn, u16 pkey_index,
34816ad9 987 int rmpp_active,
824c8ae7 988 int hdr_len, int data_len,
da2dfaa3
IW
989 gfp_t gfp_mask,
990 u8 base_version)
824c8ae7
HR
991{
992 struct ib_mad_agent_private *mad_agent_priv;
34816ad9 993 struct ib_mad_send_wr_private *mad_send_wr;
f36e1793 994 int pad, message_size, ret, size;
824c8ae7 995 void *buf;
548ead17
IW
996 size_t mad_size;
997 bool opa;
824c8ae7 998
34816ad9
SH
999 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1000 agent);
548ead17
IW
1001
1002 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1003
1004 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1005 mad_size = sizeof(struct opa_mad);
1006 else
1007 mad_size = sizeof(struct ib_mad);
1008
1009 pad = get_pad_size(hdr_len, data_len, mad_size);
f36e1793 1010 message_size = hdr_len + data_len + pad;
824c8ae7 1011
1471cb6c 1012 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
548ead17 1013 if (!rmpp_active && message_size > mad_size)
1471cb6c
IW
1014 return ERR_PTR(-EINVAL);
1015 } else
548ead17 1016 if (rmpp_active || message_size > mad_size)
1471cb6c 1017 return ERR_PTR(-EINVAL);
fa619a77 1018
548ead17 1019 size = rmpp_active ? hdr_len : mad_size;
f36e1793 1020 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
824c8ae7
HR
1021 if (!buf)
1022 return ERR_PTR(-ENOMEM);
34816ad9 1023
f36e1793
JM
1024 mad_send_wr = buf + size;
1025 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
34816ad9 1026 mad_send_wr->send_buf.mad = buf;
f36e1793
JM
1027 mad_send_wr->send_buf.hdr_len = hdr_len;
1028 mad_send_wr->send_buf.data_len = data_len;
1029 mad_send_wr->pad = pad;
34816ad9
SH
1030
1031 mad_send_wr->mad_agent_priv = mad_agent_priv;
f36e1793 1032 mad_send_wr->sg_list[0].length = hdr_len;
4be90bc6 1033 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
548ead17
IW
1034
1035 /* OPA MADs don't have to be the full 2048 bytes */
1036 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1037 data_len < mad_size - hdr_len)
1038 mad_send_wr->sg_list[1].length = data_len;
1039 else
1040 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1041
4be90bc6 1042 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
34816ad9 1043
d53e11fd
CH
1044 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1045
1046 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
e622f2f4
CH
1047 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1048 mad_send_wr->send_wr.wr.num_sge = 2;
1049 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1050 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1051 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1052 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1053 mad_send_wr->send_wr.pkey_index = pkey_index;
fa619a77
HR
1054
1055 if (rmpp_active) {
548ead17 1056 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
f36e1793
JM
1057 if (ret) {
1058 kfree(buf);
1059 return ERR_PTR(ret);
1060 }
fa619a77
HR
1061 }
1062
34816ad9 1063 mad_send_wr->send_buf.mad_agent = mad_agent;
824c8ae7 1064 atomic_inc(&mad_agent_priv->refcount);
34816ad9 1065 return &mad_send_wr->send_buf;
824c8ae7
HR
1066}
1067EXPORT_SYMBOL(ib_create_send_mad);
1068
618a3c03
HR
1069int ib_get_mad_data_offset(u8 mgmt_class)
1070{
1071 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1072 return IB_MGMT_SA_HDR;
1073 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1074 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1075 (mgmt_class == IB_MGMT_CLASS_BIS))
1076 return IB_MGMT_DEVICE_HDR;
1077 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1078 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1079 return IB_MGMT_VENDOR_HDR;
1080 else
1081 return IB_MGMT_MAD_HDR;
1082}
1083EXPORT_SYMBOL(ib_get_mad_data_offset);
1084
1085int ib_is_mad_class_rmpp(u8 mgmt_class)
1086{
1087 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1088 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1089 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1090 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1091 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1092 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1093 return 1;
1094 return 0;
1095}
1096EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1097
f36e1793
JM
1098void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1099{
1100 struct ib_mad_send_wr_private *mad_send_wr;
1101 struct list_head *list;
1102
1103 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1104 send_buf);
1105 list = &mad_send_wr->cur_seg->list;
1106
1107 if (mad_send_wr->cur_seg->num < seg_num) {
1108 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1109 if (mad_send_wr->cur_seg->num == seg_num)
1110 break;
1111 } else if (mad_send_wr->cur_seg->num > seg_num) {
1112 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1113 if (mad_send_wr->cur_seg->num == seg_num)
1114 break;
1115 }
1116 return mad_send_wr->cur_seg->data;
1117}
1118EXPORT_SYMBOL(ib_get_rmpp_segment);
1119
1120static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1121{
1122 if (mad_send_wr->send_buf.seg_count)
1123 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1124 mad_send_wr->seg_num);
1125 else
1126 return mad_send_wr->send_buf.mad +
1127 mad_send_wr->send_buf.hdr_len;
1128}
1129
824c8ae7
HR
1130void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1131{
1132 struct ib_mad_agent_private *mad_agent_priv;
f36e1793 1133 struct ib_mad_send_wr_private *mad_send_wr;
824c8ae7
HR
1134
1135 mad_agent_priv = container_of(send_buf->mad_agent,
1136 struct ib_mad_agent_private, agent);
f36e1793
JM
1137 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1138 send_buf);
824c8ae7 1139
f36e1793
JM
1140 free_send_rmpp_list(mad_send_wr);
1141 kfree(send_buf->mad);
1b52fa98 1142 deref_mad_agent(mad_agent_priv);
824c8ae7
HR
1143}
1144EXPORT_SYMBOL(ib_free_send_mad);
1145
fa619a77 1146int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4
LT
1147{
1148 struct ib_mad_qp_info *qp_info;
cabe3cbc 1149 struct list_head *list;
34816ad9
SH
1150 struct ib_send_wr *bad_send_wr;
1151 struct ib_mad_agent *mad_agent;
1152 struct ib_sge *sge;
1da177e4
LT
1153 unsigned long flags;
1154 int ret;
1155
f8197a4e 1156 /* Set WR ID to find mad_send_wr upon completion */
d760ce8f 1157 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1da177e4 1158 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
d53e11fd
CH
1159 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1160 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1da177e4 1161
34816ad9
SH
1162 mad_agent = mad_send_wr->send_buf.mad_agent;
1163 sge = mad_send_wr->sg_list;
1527106f
RC
1164 sge[0].addr = ib_dma_map_single(mad_agent->device,
1165 mad_send_wr->send_buf.mad,
1166 sge[0].length,
1167 DMA_TO_DEVICE);
2c34e68f
YB
1168 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1169 return -ENOMEM;
1170
1527106f
RC
1171 mad_send_wr->header_mapping = sge[0].addr;
1172
1173 sge[1].addr = ib_dma_map_single(mad_agent->device,
1174 ib_get_payload(mad_send_wr),
1175 sge[1].length,
1176 DMA_TO_DEVICE);
2c34e68f
YB
1177 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1178 ib_dma_unmap_single(mad_agent->device,
1179 mad_send_wr->header_mapping,
1180 sge[0].length, DMA_TO_DEVICE);
1181 return -ENOMEM;
1182 }
1527106f 1183 mad_send_wr->payload_mapping = sge[1].addr;
34816ad9 1184
1da177e4 1185 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
cabe3cbc 1186 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
e622f2f4 1187 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
34816ad9 1188 &bad_send_wr);
cabe3cbc 1189 list = &qp_info->send_queue.list;
1da177e4 1190 } else {
1da177e4 1191 ret = 0;
cabe3cbc 1192 list = &qp_info->overflow_list;
1da177e4 1193 }
cabe3cbc
HR
1194
1195 if (!ret) {
1196 qp_info->send_queue.count++;
1197 list_add_tail(&mad_send_wr->mad_list.list, list);
1198 }
1199 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
f36e1793 1200 if (ret) {
1527106f
RC
1201 ib_dma_unmap_single(mad_agent->device,
1202 mad_send_wr->header_mapping,
1203 sge[0].length, DMA_TO_DEVICE);
1204 ib_dma_unmap_single(mad_agent->device,
1205 mad_send_wr->payload_mapping,
1206 sge[1].length, DMA_TO_DEVICE);
f36e1793 1207 }
1da177e4
LT
1208 return ret;
1209}
1210
1211/*
1212 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1213 * with the registered client
1214 */
34816ad9
SH
1215int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1216 struct ib_mad_send_buf **bad_send_buf)
1da177e4 1217{
1da177e4 1218 struct ib_mad_agent_private *mad_agent_priv;
34816ad9
SH
1219 struct ib_mad_send_buf *next_send_buf;
1220 struct ib_mad_send_wr_private *mad_send_wr;
1221 unsigned long flags;
1222 int ret = -EINVAL;
1da177e4
LT
1223
1224 /* Walk list of send WRs and post each on send list */
34816ad9 1225 for (; send_buf; send_buf = next_send_buf) {
1da177e4 1226
34816ad9
SH
1227 mad_send_wr = container_of(send_buf,
1228 struct ib_mad_send_wr_private,
1229 send_buf);
1230 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 1231
34816ad9
SH
1232 if (!send_buf->mad_agent->send_handler ||
1233 (send_buf->timeout_ms &&
1234 !send_buf->mad_agent->recv_handler)) {
1235 ret = -EINVAL;
1236 goto error;
1da177e4
LT
1237 }
1238
618a3c03
HR
1239 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1240 if (mad_agent_priv->agent.rmpp_version) {
1241 ret = -EINVAL;
1242 goto error;
1243 }
1244 }
1245
1da177e4
LT
1246 /*
1247 * Save pointer to next work request to post in case the
1248 * current one completes, and the user modifies the work
1249 * request associated with the completion
1250 */
34816ad9 1251 next_send_buf = send_buf->next;
e622f2f4 1252 mad_send_wr->send_wr.ah = send_buf->ah;
1da177e4 1253
34816ad9
SH
1254 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1255 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1256 ret = handle_outgoing_dr_smp(mad_agent_priv,
1257 mad_send_wr);
1da177e4 1258 if (ret < 0) /* error */
34816ad9 1259 goto error;
1da177e4 1260 else if (ret == 1) /* locally consumed */
34816ad9 1261 continue;
1da177e4
LT
1262 }
1263
34816ad9 1264 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1da177e4 1265 /* Timeout will be updated after send completes */
34816ad9 1266 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
4fc8cd49
SH
1267 mad_send_wr->max_retries = send_buf->retries;
1268 mad_send_wr->retries_left = send_buf->retries;
1269 send_buf->retries = 0;
34816ad9 1270 /* Reference for work request to QP + response */
1da177e4
LT
1271 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1272 mad_send_wr->status = IB_WC_SUCCESS;
1273
1274 /* Reference MAD agent until send completes */
1275 atomic_inc(&mad_agent_priv->refcount);
1276 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1277 list_add_tail(&mad_send_wr->agent_list,
1278 &mad_agent_priv->send_list);
1279 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1280
1471cb6c 1281 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
1282 ret = ib_send_rmpp_mad(mad_send_wr);
1283 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1284 ret = ib_send_mad(mad_send_wr);
1285 } else
1286 ret = ib_send_mad(mad_send_wr);
1287 if (ret < 0) {
1da177e4
LT
1288 /* Fail send request */
1289 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1290 list_del(&mad_send_wr->agent_list);
1291 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1292 atomic_dec(&mad_agent_priv->refcount);
34816ad9 1293 goto error;
1da177e4 1294 }
1da177e4
LT
1295 }
1296 return 0;
34816ad9
SH
1297error:
1298 if (bad_send_buf)
1299 *bad_send_buf = send_buf;
1da177e4
LT
1300 return ret;
1301}
1302EXPORT_SYMBOL(ib_post_send_mad);
1303
1304/*
1305 * ib_free_recv_mad - Returns data buffers used to receive
1306 * a MAD to the access layer
1307 */
1308void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1309{
fa619a77 1310 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1da177e4
LT
1311 struct ib_mad_private_header *mad_priv_hdr;
1312 struct ib_mad_private *priv;
fa619a77 1313 struct list_head free_list;
1da177e4 1314
fa619a77
HR
1315 INIT_LIST_HEAD(&free_list);
1316 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1da177e4 1317
fa619a77
HR
1318 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1319 &free_list, list) {
1320 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1321 recv_buf);
1da177e4
LT
1322 mad_priv_hdr = container_of(mad_recv_wc,
1323 struct ib_mad_private_header,
1324 recv_wc);
1325 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1326 header);
c9082e51 1327 kfree(priv);
1da177e4 1328 }
1da177e4
LT
1329}
1330EXPORT_SYMBOL(ib_free_recv_mad);
1331
1da177e4
LT
1332struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1333 u8 rmpp_version,
1334 ib_mad_send_handler send_handler,
1335 ib_mad_recv_handler recv_handler,
1336 void *context)
1337{
1338 return ERR_PTR(-EINVAL); /* XXX: for now */
1339}
1340EXPORT_SYMBOL(ib_redirect_mad_qp);
1341
1342int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1343 struct ib_wc *wc)
1344{
7ef5d4b0
IW
1345 dev_err(&mad_agent->device->dev,
1346 "ib_process_mad_wc() not implemented yet\n");
1da177e4
LT
1347 return 0;
1348}
1349EXPORT_SYMBOL(ib_process_mad_wc);
1350
1351static int method_in_use(struct ib_mad_mgmt_method_table **method,
1352 struct ib_mad_reg_req *mad_reg_req)
1353{
1354 int i;
1355
19b629f5 1356 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1da177e4 1357 if ((*method)->agent[i]) {
7ef5d4b0 1358 pr_err("Method %d already in use\n", i);
1da177e4
LT
1359 return -EINVAL;
1360 }
1361 }
1362 return 0;
1363}
1364
1365static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1366{
1367 /* Allocate management method table */
de6eb66b 1368 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1da177e4 1369 if (!*method) {
7ef5d4b0 1370 pr_err("No memory for ib_mad_mgmt_method_table\n");
1da177e4
LT
1371 return -ENOMEM;
1372 }
1da177e4
LT
1373
1374 return 0;
1375}
1376
1377/*
1378 * Check to see if there are any methods still in use
1379 */
1380static int check_method_table(struct ib_mad_mgmt_method_table *method)
1381{
1382 int i;
1383
1384 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1385 if (method->agent[i])
1386 return 1;
1387 return 0;
1388}
1389
1390/*
1391 * Check to see if there are any method tables for this class still in use
1392 */
1393static int check_class_table(struct ib_mad_mgmt_class_table *class)
1394{
1395 int i;
1396
1397 for (i = 0; i < MAX_MGMT_CLASS; i++)
1398 if (class->method_table[i])
1399 return 1;
1400 return 0;
1401}
1402
1403static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1404{
1405 int i;
1406
1407 for (i = 0; i < MAX_MGMT_OUI; i++)
1408 if (vendor_class->method_table[i])
1409 return 1;
1410 return 0;
1411}
1412
1413static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
d94bd266 1414 const char *oui)
1da177e4
LT
1415{
1416 int i;
1417
1418 for (i = 0; i < MAX_MGMT_OUI; i++)
3cd96564
RD
1419 /* Is there matching OUI for this vendor class ? */
1420 if (!memcmp(vendor_class->oui[i], oui, 3))
1da177e4
LT
1421 return i;
1422
1423 return -1;
1424}
1425
1426static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1427{
1428 int i;
1429
1430 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1431 if (vendor->vendor_class[i])
1432 return 1;
1433
1434 return 0;
1435}
1436
1437static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1438 struct ib_mad_agent_private *agent)
1439{
1440 int i;
1441
1442 /* Remove any methods for this mad agent */
1443 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1444 if (method->agent[i] == agent) {
1445 method->agent[i] = NULL;
1446 }
1447 }
1448}
1449
1450static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1451 struct ib_mad_agent_private *agent_priv,
1452 u8 mgmt_class)
1453{
1454 struct ib_mad_port_private *port_priv;
1455 struct ib_mad_mgmt_class_table **class;
1456 struct ib_mad_mgmt_method_table **method;
1457 int i, ret;
1458
1459 port_priv = agent_priv->qp_info->port_priv;
1460 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1461 if (!*class) {
1462 /* Allocate management class table for "new" class version */
de6eb66b 1463 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1da177e4 1464 if (!*class) {
7ef5d4b0
IW
1465 dev_err(&agent_priv->agent.device->dev,
1466 "No memory for ib_mad_mgmt_class_table\n");
1da177e4
LT
1467 ret = -ENOMEM;
1468 goto error1;
1469 }
de6eb66b 1470
1da177e4
LT
1471 /* Allocate method table for this management class */
1472 method = &(*class)->method_table[mgmt_class];
1473 if ((ret = allocate_method_table(method)))
1474 goto error2;
1475 } else {
1476 method = &(*class)->method_table[mgmt_class];
1477 if (!*method) {
1478 /* Allocate method table for this management class */
1479 if ((ret = allocate_method_table(method)))
1480 goto error1;
1481 }
1482 }
1483
1484 /* Now, make sure methods are not already in use */
1485 if (method_in_use(method, mad_reg_req))
1486 goto error3;
1487
1488 /* Finally, add in methods being registered */
19b629f5 1489 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1490 (*method)->agent[i] = agent_priv;
19b629f5 1491
1da177e4
LT
1492 return 0;
1493
1494error3:
1495 /* Remove any methods for this mad agent */
1496 remove_methods_mad_agent(*method, agent_priv);
1497 /* Now, check to see if there are any methods in use */
1498 if (!check_method_table(*method)) {
1499 /* If not, release management method table */
1500 kfree(*method);
1501 *method = NULL;
1502 }
1503 ret = -EINVAL;
1504 goto error1;
1505error2:
1506 kfree(*class);
1507 *class = NULL;
1508error1:
1509 return ret;
1510}
1511
1512static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1513 struct ib_mad_agent_private *agent_priv)
1514{
1515 struct ib_mad_port_private *port_priv;
1516 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1517 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1518 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1519 struct ib_mad_mgmt_method_table **method;
1520 int i, ret = -ENOMEM;
1521 u8 vclass;
1522
1523 /* "New" vendor (with OUI) class */
1524 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1525 port_priv = agent_priv->qp_info->port_priv;
1526 vendor_table = &port_priv->version[
1527 mad_reg_req->mgmt_class_version].vendor;
1528 if (!*vendor_table) {
1529 /* Allocate mgmt vendor class table for "new" class version */
de6eb66b 1530 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1da177e4 1531 if (!vendor) {
7ef5d4b0
IW
1532 dev_err(&agent_priv->agent.device->dev,
1533 "No memory for ib_mad_mgmt_vendor_class_table\n");
1da177e4
LT
1534 goto error1;
1535 }
de6eb66b 1536
1da177e4
LT
1537 *vendor_table = vendor;
1538 }
1539 if (!(*vendor_table)->vendor_class[vclass]) {
1540 /* Allocate table for this management vendor class */
de6eb66b 1541 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1da177e4 1542 if (!vendor_class) {
7ef5d4b0
IW
1543 dev_err(&agent_priv->agent.device->dev,
1544 "No memory for ib_mad_mgmt_vendor_class\n");
1da177e4
LT
1545 goto error2;
1546 }
de6eb66b 1547
1da177e4
LT
1548 (*vendor_table)->vendor_class[vclass] = vendor_class;
1549 }
1550 for (i = 0; i < MAX_MGMT_OUI; i++) {
1551 /* Is there matching OUI for this vendor class ? */
1552 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1553 mad_reg_req->oui, 3)) {
1554 method = &(*vendor_table)->vendor_class[
1555 vclass]->method_table[i];
1556 BUG_ON(!*method);
1557 goto check_in_use;
1558 }
1559 }
1560 for (i = 0; i < MAX_MGMT_OUI; i++) {
1561 /* OUI slot available ? */
1562 if (!is_vendor_oui((*vendor_table)->vendor_class[
1563 vclass]->oui[i])) {
1564 method = &(*vendor_table)->vendor_class[
1565 vclass]->method_table[i];
1566 BUG_ON(*method);
1567 /* Allocate method table for this OUI */
1568 if ((ret = allocate_method_table(method)))
1569 goto error3;
1570 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1571 mad_reg_req->oui, 3);
1572 goto check_in_use;
1573 }
1574 }
7ef5d4b0 1575 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1da177e4
LT
1576 goto error3;
1577
1578check_in_use:
1579 /* Now, make sure methods are not already in use */
1580 if (method_in_use(method, mad_reg_req))
1581 goto error4;
1582
1583 /* Finally, add in methods being registered */
19b629f5 1584 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1da177e4 1585 (*method)->agent[i] = agent_priv;
19b629f5 1586
1da177e4
LT
1587 return 0;
1588
1589error4:
1590 /* Remove any methods for this mad agent */
1591 remove_methods_mad_agent(*method, agent_priv);
1592 /* Now, check to see if there are any methods in use */
1593 if (!check_method_table(*method)) {
1594 /* If not, release management method table */
1595 kfree(*method);
1596 *method = NULL;
1597 }
1598 ret = -EINVAL;
1599error3:
1600 if (vendor_class) {
1601 (*vendor_table)->vendor_class[vclass] = NULL;
1602 kfree(vendor_class);
1603 }
1604error2:
1605 if (vendor) {
1606 *vendor_table = NULL;
1607 kfree(vendor);
1608 }
1609error1:
1610 return ret;
1611}
1612
1613static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1614{
1615 struct ib_mad_port_private *port_priv;
1616 struct ib_mad_mgmt_class_table *class;
1617 struct ib_mad_mgmt_method_table *method;
1618 struct ib_mad_mgmt_vendor_class_table *vendor;
1619 struct ib_mad_mgmt_vendor_class *vendor_class;
1620 int index;
1621 u8 mgmt_class;
1622
1623 /*
1624 * Was MAD registration request supplied
1625 * with original registration ?
1626 */
1627 if (!agent_priv->reg_req) {
1628 goto out;
1629 }
1630
1631 port_priv = agent_priv->qp_info->port_priv;
1632 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1633 class = port_priv->version[
1634 agent_priv->reg_req->mgmt_class_version].class;
1635 if (!class)
1636 goto vendor_check;
1637
1638 method = class->method_table[mgmt_class];
1639 if (method) {
1640 /* Remove any methods for this mad agent */
1641 remove_methods_mad_agent(method, agent_priv);
1642 /* Now, check to see if there are any methods still in use */
1643 if (!check_method_table(method)) {
1644 /* If not, release management method table */
1645 kfree(method);
1646 class->method_table[mgmt_class] = NULL;
1647 /* Any management classes left ? */
1648 if (!check_class_table(class)) {
1649 /* If not, release management class table */
1650 kfree(class);
1651 port_priv->version[
1652 agent_priv->reg_req->
1653 mgmt_class_version].class = NULL;
1654 }
1655 }
1656 }
1657
1658vendor_check:
1659 if (!is_vendor_class(mgmt_class))
1660 goto out;
1661
1662 /* normalize mgmt_class to vendor range 2 */
1663 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1664 vendor = port_priv->version[
1665 agent_priv->reg_req->mgmt_class_version].vendor;
1666
1667 if (!vendor)
1668 goto out;
1669
1670 vendor_class = vendor->vendor_class[mgmt_class];
1671 if (vendor_class) {
1672 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1673 if (index < 0)
1674 goto out;
1675 method = vendor_class->method_table[index];
1676 if (method) {
1677 /* Remove any methods for this mad agent */
1678 remove_methods_mad_agent(method, agent_priv);
1679 /*
1680 * Now, check to see if there are
1681 * any methods still in use
1682 */
1683 if (!check_method_table(method)) {
1684 /* If not, release management method table */
1685 kfree(method);
1686 vendor_class->method_table[index] = NULL;
1687 memset(vendor_class->oui[index], 0, 3);
1688 /* Any OUIs left ? */
1689 if (!check_vendor_class(vendor_class)) {
1690 /* If not, release vendor class table */
1691 kfree(vendor_class);
1692 vendor->vendor_class[mgmt_class] = NULL;
1693 /* Any other vendor classes left ? */
1694 if (!check_vendor_table(vendor)) {
1695 kfree(vendor);
1696 port_priv->version[
1697 agent_priv->reg_req->
1698 mgmt_class_version].
1699 vendor = NULL;
1700 }
1701 }
1702 }
1703 }
1704 }
1705
1706out:
1707 return;
1708}
1709
1da177e4
LT
1710static struct ib_mad_agent_private *
1711find_mad_agent(struct ib_mad_port_private *port_priv,
d94bd266 1712 const struct ib_mad_hdr *mad_hdr)
1da177e4
LT
1713{
1714 struct ib_mad_agent_private *mad_agent = NULL;
1715 unsigned long flags;
1716
1717 spin_lock_irqsave(&port_priv->reg_lock, flags);
d94bd266 1718 if (ib_response_mad(mad_hdr)) {
1da177e4
LT
1719 u32 hi_tid;
1720 struct ib_mad_agent_private *entry;
1721
1722 /*
1723 * Routing is based on high 32 bits of transaction ID
1724 * of MAD.
1725 */
d94bd266 1726 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
34816ad9 1727 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1da177e4
LT
1728 if (entry->agent.hi_tid == hi_tid) {
1729 mad_agent = entry;
1730 break;
1731 }
1732 }
1733 } else {
1734 struct ib_mad_mgmt_class_table *class;
1735 struct ib_mad_mgmt_method_table *method;
1736 struct ib_mad_mgmt_vendor_class_table *vendor;
1737 struct ib_mad_mgmt_vendor_class *vendor_class;
d94bd266 1738 const struct ib_vendor_mad *vendor_mad;
1da177e4
LT
1739 int index;
1740
1741 /*
1742 * Routing is based on version, class, and method
1743 * For "newer" vendor MADs, also based on OUI
1744 */
d94bd266 1745 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1da177e4 1746 goto out;
d94bd266 1747 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1da177e4 1748 class = port_priv->version[
d94bd266 1749 mad_hdr->class_version].class;
1da177e4
LT
1750 if (!class)
1751 goto out;
d94bd266 1752 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
b7ab0b19
HS
1753 IB_MGMT_MAX_METHODS)
1754 goto out;
1da177e4 1755 method = class->method_table[convert_mgmt_class(
d94bd266 1756 mad_hdr->mgmt_class)];
1da177e4 1757 if (method)
d94bd266 1758 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1759 ~IB_MGMT_METHOD_RESP];
1760 } else {
1761 vendor = port_priv->version[
d94bd266 1762 mad_hdr->class_version].vendor;
1da177e4
LT
1763 if (!vendor)
1764 goto out;
1765 vendor_class = vendor->vendor_class[vendor_class_index(
d94bd266 1766 mad_hdr->mgmt_class)];
1da177e4
LT
1767 if (!vendor_class)
1768 goto out;
1769 /* Find matching OUI */
d94bd266 1770 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1da177e4
LT
1771 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1772 if (index == -1)
1773 goto out;
1774 method = vendor_class->method_table[index];
1775 if (method) {
d94bd266 1776 mad_agent = method->agent[mad_hdr->method &
1da177e4
LT
1777 ~IB_MGMT_METHOD_RESP];
1778 }
1779 }
1780 }
1781
1782 if (mad_agent) {
1783 if (mad_agent->agent.recv_handler)
1784 atomic_inc(&mad_agent->refcount);
1785 else {
7ef5d4b0
IW
1786 dev_notice(&port_priv->device->dev,
1787 "No receive handler for client %p on port %d\n",
1788 &mad_agent->agent, port_priv->port_num);
1da177e4
LT
1789 mad_agent = NULL;
1790 }
1791 }
1792out:
1793 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1794
1795 return mad_agent;
1796}
1797
8e4349d1
IW
1798static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1799 const struct ib_mad_qp_info *qp_info,
1800 bool opa)
1da177e4
LT
1801{
1802 int valid = 0;
8e4349d1 1803 u32 qp_num = qp_info->qp->qp_num;
1da177e4
LT
1804
1805 /* Make sure MAD base version is understood */
8e4349d1
IW
1806 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1807 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1808 pr_err("MAD received with unsupported base version %d %s\n",
1809 mad_hdr->base_version, opa ? "(opa)" : "");
1da177e4
LT
1810 goto out;
1811 }
1812
1813 /* Filter SMI packets sent to other than QP0 */
77f60833
IW
1814 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1815 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1da177e4
LT
1816 if (qp_num == 0)
1817 valid = 1;
1818 } else {
53370886
HR
1819 /* CM attributes other than ClassPortInfo only use Send method */
1820 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1821 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1822 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1823 goto out;
1da177e4
LT
1824 /* Filter GSI packets sent to QP0 */
1825 if (qp_num != 0)
1826 valid = 1;
1827 }
1828
1829out:
1830 return valid;
1831}
1832
f766c58f
IW
1833static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1834 const struct ib_mad_hdr *mad_hdr)
fa619a77
HR
1835{
1836 struct ib_rmpp_mad *rmpp_mad;
1837
1838 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1839 return !mad_agent_priv->agent.rmpp_version ||
1471cb6c 1840 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
fa619a77
HR
1841 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1842 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1843 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1844}
1845
8bf4b30c
IW
1846static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1847 const struct ib_mad_recv_wc *rwc)
fa9656bb 1848{
8bf4b30c 1849 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
fa9656bb
JM
1850 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1851}
1852
f766c58f
IW
1853static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1854 const struct ib_mad_send_wr_private *wr,
1855 const struct ib_mad_recv_wc *rwc )
fa9656bb
JM
1856{
1857 struct ib_ah_attr attr;
1858 u8 send_resp, rcv_resp;
9874e746
JM
1859 union ib_gid sgid;
1860 struct ib_device *device = mad_agent_priv->agent.device;
1861 u8 port_num = mad_agent_priv->agent.port_num;
1862 u8 lmc;
fa9656bb 1863
96909308
IW
1864 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1865 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
fa9656bb 1866
fa9656bb
JM
1867 if (send_resp == rcv_resp)
1868 /* both requests, or both responses. GIDs different */
1869 return 0;
1870
1871 if (ib_query_ah(wr->send_buf.ah, &attr))
1872 /* Assume not equal, to avoid false positives. */
1873 return 0;
1874
9874e746
JM
1875 if (!!(attr.ah_flags & IB_AH_GRH) !=
1876 !!(rwc->wc->wc_flags & IB_WC_GRH))
fa9656bb
JM
1877 /* one has GID, other does not. Assume different */
1878 return 0;
9874e746
JM
1879
1880 if (!send_resp && rcv_resp) {
1881 /* is request/response. */
1882 if (!(attr.ah_flags & IB_AH_GRH)) {
1883 if (ib_get_cached_lmc(device, port_num, &lmc))
1884 return 0;
1885 return (!lmc || !((attr.src_path_bits ^
1886 rwc->wc->dlid_path_bits) &
1887 ((1 << lmc) - 1)));
1888 } else {
1889 if (ib_get_cached_gid(device, port_num,
55ee3ab2 1890 attr.grh.sgid_index, &sgid, NULL))
9874e746
JM
1891 return 0;
1892 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1893 16);
1894 }
1895 }
1896
1897 if (!(attr.ah_flags & IB_AH_GRH))
1898 return attr.dlid == rwc->wc->slid;
1899 else
1900 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1901 16);
1902}
1903
1904static inline int is_direct(u8 class)
1905{
1906 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
fa9656bb 1907}
9874e746 1908
fa619a77 1909struct ib_mad_send_wr_private*
f766c58f
IW
1910ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1911 const struct ib_mad_recv_wc *wc)
1da177e4 1912{
9874e746 1913 struct ib_mad_send_wr_private *wr;
83a1d228 1914 const struct ib_mad_hdr *mad_hdr;
fa9656bb 1915
83a1d228 1916 mad_hdr = &wc->recv_buf.mad->mad_hdr;
9874e746
JM
1917
1918 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
83a1d228 1919 if ((wr->tid == mad_hdr->tid) &&
9874e746
JM
1920 rcv_has_same_class(wr, wc) &&
1921 /*
1922 * Don't check GID for direct routed MADs.
1923 * These might have permissive LIDs.
1924 */
83a1d228 1925 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1926 rcv_has_same_gid(mad_agent_priv, wr, wc)))
39798695 1927 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1928 }
1929
1930 /*
1931 * It's possible to receive the response before we've
1932 * been notified that the send has completed
1933 */
9874e746 1934 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
c597eee5 1935 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
83a1d228 1936 wr->tid == mad_hdr->tid &&
9874e746
JM
1937 wr->timeout &&
1938 rcv_has_same_class(wr, wc) &&
1939 /*
1940 * Don't check GID for direct routed MADs.
1941 * These might have permissive LIDs.
1942 */
83a1d228 1943 (is_direct(mad_hdr->mgmt_class) ||
9874e746 1944 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1da177e4 1945 /* Verify request has not been canceled */
9874e746 1946 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1da177e4
LT
1947 }
1948 return NULL;
1949}
1950
fa619a77 1951void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
6a0c435e
HR
1952{
1953 mad_send_wr->timeout = 0;
179e0917
AM
1954 if (mad_send_wr->refcount == 1)
1955 list_move_tail(&mad_send_wr->agent_list,
6a0c435e 1956 &mad_send_wr->mad_agent_priv->done_list);
6a0c435e
HR
1957}
1958
1da177e4 1959static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
4a0754fa 1960 struct ib_mad_recv_wc *mad_recv_wc)
1da177e4
LT
1961{
1962 struct ib_mad_send_wr_private *mad_send_wr;
1963 struct ib_mad_send_wc mad_send_wc;
1964 unsigned long flags;
1965
fa619a77
HR
1966 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1967 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1471cb6c 1968 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
1969 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1970 mad_recv_wc);
1971 if (!mad_recv_wc) {
1b52fa98 1972 deref_mad_agent(mad_agent_priv);
fa619a77
HR
1973 return;
1974 }
1975 }
1976
1da177e4 1977 /* Complete corresponding request */
96909308 1978 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1da177e4 1979 spin_lock_irqsave(&mad_agent_priv->lock, flags);
fa9656bb 1980 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1da177e4
LT
1981 if (!mad_send_wr) {
1982 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1471cb6c
IW
1983 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1984 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1985 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1986 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1987 /* user rmpp is in effect
1988 * and this is an active RMPP MAD
1989 */
ca281265
CH
1990 mad_agent_priv->agent.recv_handler(
1991 &mad_agent_priv->agent, NULL,
1992 mad_recv_wc);
1471cb6c
IW
1993 atomic_dec(&mad_agent_priv->refcount);
1994 } else {
1995 /* not user rmpp, revert to normal behavior and
1996 * drop the mad */
1997 ib_free_recv_mad(mad_recv_wc);
1998 deref_mad_agent(mad_agent_priv);
1999 return;
2000 }
2001 } else {
2002 ib_mark_mad_done(mad_send_wr);
2003 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4 2004
1471cb6c 2005 /* Defined behavior is to complete response before request */
ca281265
CH
2006 mad_agent_priv->agent.recv_handler(
2007 &mad_agent_priv->agent,
2008 &mad_send_wr->send_buf,
2009 mad_recv_wc);
1471cb6c 2010 atomic_dec(&mad_agent_priv->refcount);
1da177e4 2011
1471cb6c
IW
2012 mad_send_wc.status = IB_WC_SUCCESS;
2013 mad_send_wc.vendor_err = 0;
2014 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2015 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2016 }
1da177e4 2017 } else {
ca281265 2018 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
4a0754fa 2019 mad_recv_wc);
1b52fa98 2020 deref_mad_agent(mad_agent_priv);
1da177e4
LT
2021 }
2022}
2023
e11ae8aa
IW
2024static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2025 const struct ib_mad_qp_info *qp_info,
2026 const struct ib_wc *wc,
2027 int port_num,
2028 struct ib_mad_private *recv,
2029 struct ib_mad_private *response)
2030{
2031 enum smi_forward_action retsmi;
c9082e51 2032 struct ib_smp *smp = (struct ib_smp *)recv->mad;
e11ae8aa 2033
c9082e51 2034 if (smi_handle_dr_smp_recv(smp,
4139032b 2035 rdma_cap_ib_switch(port_priv->device),
e11ae8aa
IW
2036 port_num,
2037 port_priv->device->phys_port_cnt) ==
2038 IB_SMI_DISCARD)
2039 return IB_SMI_DISCARD;
2040
c9082e51 2041 retsmi = smi_check_forward_dr_smp(smp);
e11ae8aa
IW
2042 if (retsmi == IB_SMI_LOCAL)
2043 return IB_SMI_HANDLE;
2044
2045 if (retsmi == IB_SMI_SEND) { /* don't forward */
c9082e51 2046 if (smi_handle_dr_smp_send(smp,
4139032b 2047 rdma_cap_ib_switch(port_priv->device),
e11ae8aa
IW
2048 port_num) == IB_SMI_DISCARD)
2049 return IB_SMI_DISCARD;
2050
c9082e51 2051 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
e11ae8aa 2052 return IB_SMI_DISCARD;
4139032b 2053 } else if (rdma_cap_ib_switch(port_priv->device)) {
e11ae8aa 2054 /* forward case for switches */
c9082e51 2055 memcpy(response, recv, mad_priv_size(response));
e11ae8aa 2056 response->header.recv_wc.wc = &response->header.wc;
c9082e51 2057 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
e11ae8aa
IW
2058 response->header.recv_wc.recv_buf.grh = &response->grh;
2059
c9082e51 2060 agent_send_response((const struct ib_mad_hdr *)response->mad,
e11ae8aa
IW
2061 &response->grh, wc,
2062 port_priv->device,
c9082e51
IW
2063 smi_get_fwd_port(smp),
2064 qp_info->qp->qp_num,
8e4349d1
IW
2065 response->mad_size,
2066 false);
e11ae8aa
IW
2067
2068 return IB_SMI_DISCARD;
2069 }
2070 return IB_SMI_HANDLE;
2071}
2072
c9082e51 2073static bool generate_unmatched_resp(const struct ib_mad_private *recv,
8e4349d1
IW
2074 struct ib_mad_private *response,
2075 size_t *resp_len, bool opa)
0b307043 2076{
c9082e51
IW
2077 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2078 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2079
2080 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2081 recv_hdr->method == IB_MGMT_METHOD_SET) {
2082 memcpy(response, recv, mad_priv_size(response));
0b307043 2083 response->header.recv_wc.wc = &response->header.wc;
c9082e51 2084 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
0b307043 2085 response->header.recv_wc.recv_buf.grh = &response->grh;
c9082e51
IW
2086 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2087 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2088 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2089 resp_hdr->status |= IB_SMP_DIRECTION;
0b307043 2090
8e4349d1
IW
2091 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2092 if (recv_hdr->mgmt_class ==
2093 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2094 recv_hdr->mgmt_class ==
2095 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2096 *resp_len = opa_get_smp_header_size(
2097 (struct opa_smp *)recv->mad);
2098 else
2099 *resp_len = sizeof(struct ib_mad_hdr);
2100 }
2101
0b307043
ST
2102 return true;
2103 } else {
2104 return false;
2105 }
2106}
8e4349d1
IW
2107
2108static enum smi_action
2109handle_opa_smi(struct ib_mad_port_private *port_priv,
2110 struct ib_mad_qp_info *qp_info,
2111 struct ib_wc *wc,
2112 int port_num,
2113 struct ib_mad_private *recv,
2114 struct ib_mad_private *response)
2115{
2116 enum smi_forward_action retsmi;
2117 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2118
2119 if (opa_smi_handle_dr_smp_recv(smp,
4139032b 2120 rdma_cap_ib_switch(port_priv->device),
8e4349d1
IW
2121 port_num,
2122 port_priv->device->phys_port_cnt) ==
2123 IB_SMI_DISCARD)
2124 return IB_SMI_DISCARD;
2125
2126 retsmi = opa_smi_check_forward_dr_smp(smp);
2127 if (retsmi == IB_SMI_LOCAL)
2128 return IB_SMI_HANDLE;
2129
2130 if (retsmi == IB_SMI_SEND) { /* don't forward */
2131 if (opa_smi_handle_dr_smp_send(smp,
4139032b 2132 rdma_cap_ib_switch(port_priv->device),
8e4349d1
IW
2133 port_num) == IB_SMI_DISCARD)
2134 return IB_SMI_DISCARD;
2135
2136 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2137 IB_SMI_DISCARD)
2138 return IB_SMI_DISCARD;
2139
4139032b 2140 } else if (rdma_cap_ib_switch(port_priv->device)) {
8e4349d1
IW
2141 /* forward case for switches */
2142 memcpy(response, recv, mad_priv_size(response));
2143 response->header.recv_wc.wc = &response->header.wc;
2144 response->header.recv_wc.recv_buf.opa_mad =
2145 (struct opa_mad *)response->mad;
2146 response->header.recv_wc.recv_buf.grh = &response->grh;
2147
2148 agent_send_response((const struct ib_mad_hdr *)response->mad,
2149 &response->grh, wc,
2150 port_priv->device,
2151 opa_smi_get_fwd_port(smp),
2152 qp_info->qp->qp_num,
2153 recv->header.wc.byte_len,
2154 true);
2155
2156 return IB_SMI_DISCARD;
2157 }
2158
2159 return IB_SMI_HANDLE;
2160}
2161
2162static enum smi_action
2163handle_smi(struct ib_mad_port_private *port_priv,
2164 struct ib_mad_qp_info *qp_info,
2165 struct ib_wc *wc,
2166 int port_num,
2167 struct ib_mad_private *recv,
2168 struct ib_mad_private *response,
2169 bool opa)
2170{
2171 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2172
2173 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2174 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2175 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2176 response);
2177
2178 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2179}
2180
d53e11fd 2181static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1da177e4 2182{
d53e11fd
CH
2183 struct ib_mad_port_private *port_priv = cq->cq_context;
2184 struct ib_mad_list_head *mad_list =
2185 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
1da177e4
LT
2186 struct ib_mad_qp_info *qp_info;
2187 struct ib_mad_private_header *mad_priv_hdr;
445d6807 2188 struct ib_mad_private *recv, *response = NULL;
1da177e4 2189 struct ib_mad_agent_private *mad_agent;
1bae4dbf 2190 int port_num;
a9e74323 2191 int ret = IB_MAD_RESULT_SUCCESS;
4cd7c947
IW
2192 size_t mad_size;
2193 u16 resp_mad_pkey_index = 0;
8e4349d1 2194 bool opa;
1da177e4 2195
d53e11fd
CH
2196 if (list_empty_careful(&port_priv->port_list))
2197 return;
2198
2199 if (wc->status != IB_WC_SUCCESS) {
2200 /*
2201 * Receive errors indicate that the QP has entered the error
2202 * state - error handling/shutdown code will cleanup
2203 */
2204 return;
2205 }
2206
1da177e4
LT
2207 qp_info = mad_list->mad_queue->qp_info;
2208 dequeue_mad(mad_list);
2209
8e4349d1
IW
2210 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2211 qp_info->port_priv->port_num);
2212
1da177e4
LT
2213 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2214 mad_list);
2215 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1527106f
RC
2216 ib_dma_unmap_single(port_priv->device,
2217 recv->header.mapping,
c9082e51 2218 mad_priv_dma_size(recv),
1527106f 2219 DMA_FROM_DEVICE);
1da177e4
LT
2220
2221 /* Setup MAD receive work completion from "normal" work completion */
24239aff
SH
2222 recv->header.wc = *wc;
2223 recv->header.recv_wc.wc = &recv->header.wc;
8e4349d1
IW
2224
2225 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2226 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2227 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2228 } else {
2229 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2230 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2231 }
2232
c9082e51 2233 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
1da177e4
LT
2234 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2235
2236 if (atomic_read(&qp_info->snoop_count))
2237 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2238
2239 /* Validate MAD */
8e4349d1 2240 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
1da177e4
LT
2241 goto out;
2242
4cd7c947
IW
2243 mad_size = recv->mad_size;
2244 response = alloc_mad_private(mad_size, GFP_KERNEL);
445d6807 2245 if (!response) {
7ef5d4b0 2246 dev_err(&port_priv->device->dev,
d53e11fd 2247 "%s: no memory for response buffer\n", __func__);
445d6807
HR
2248 goto out;
2249 }
2250
4139032b 2251 if (rdma_cap_ib_switch(port_priv->device))
1bae4dbf
HR
2252 port_num = wc->port_num;
2253 else
2254 port_num = port_priv->port_num;
2255
c9082e51 2256 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
1da177e4 2257 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
8e4349d1
IW
2258 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2259 response, opa)
e11ae8aa 2260 == IB_SMI_DISCARD)
1da177e4 2261 goto out;
1da177e4
LT
2262 }
2263
1da177e4
LT
2264 /* Give driver "right of first refusal" on incoming MAD */
2265 if (port_priv->device->process_mad) {
1da177e4
LT
2266 ret = port_priv->device->process_mad(port_priv->device, 0,
2267 port_priv->port_num,
2268 wc, &recv->grh,
4cd7c947
IW
2269 (const struct ib_mad_hdr *)recv->mad,
2270 recv->mad_size,
2271 (struct ib_mad_hdr *)response->mad,
2272 &mad_size, &resp_mad_pkey_index);
8e4349d1
IW
2273
2274 if (opa)
2275 wc->pkey_index = resp_mad_pkey_index;
2276
1da177e4
LT
2277 if (ret & IB_MAD_RESULT_SUCCESS) {
2278 if (ret & IB_MAD_RESULT_CONSUMED)
2279 goto out;
2280 if (ret & IB_MAD_RESULT_REPLY) {
c9082e51 2281 agent_send_response((const struct ib_mad_hdr *)response->mad,
34816ad9
SH
2282 &recv->grh, wc,
2283 port_priv->device,
1bae4dbf 2284 port_num,
c9082e51 2285 qp_info->qp->qp_num,
8e4349d1 2286 mad_size, opa);
1da177e4
LT
2287 goto out;
2288 }
2289 }
2290 }
2291
c9082e51 2292 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
1da177e4 2293 if (mad_agent) {
4a0754fa 2294 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1da177e4
LT
2295 /*
2296 * recv is freed up in error cases in ib_mad_complete_recv
2297 * or via recv_handler in ib_mad_complete_recv()
2298 */
2299 recv = NULL;
a9e74323 2300 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
8e4349d1 2301 generate_unmatched_resp(recv, response, &mad_size, opa)) {
c9082e51
IW
2302 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2303 port_priv->device, port_num,
8e4349d1 2304 qp_info->qp->qp_num, mad_size, opa);
1da177e4
LT
2305 }
2306
2307out:
2308 /* Post another receive request for this QP */
2309 if (response) {
2310 ib_mad_post_receive_mads(qp_info, response);
c9082e51 2311 kfree(recv);
1da177e4
LT
2312 } else
2313 ib_mad_post_receive_mads(qp_info, recv);
2314}
2315
2316static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2317{
2318 struct ib_mad_send_wr_private *mad_send_wr;
2319 unsigned long delay;
2320
2321 if (list_empty(&mad_agent_priv->wait_list)) {
136b5721 2322 cancel_delayed_work(&mad_agent_priv->timed_work);
1da177e4
LT
2323 } else {
2324 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2325 struct ib_mad_send_wr_private,
2326 agent_list);
2327
2328 if (time_after(mad_agent_priv->timeout,
2329 mad_send_wr->timeout)) {
2330 mad_agent_priv->timeout = mad_send_wr->timeout;
1da177e4
LT
2331 delay = mad_send_wr->timeout - jiffies;
2332 if ((long)delay <= 0)
2333 delay = 1;
e7c2f967
TH
2334 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2335 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2336 }
2337 }
2338}
2339
d760ce8f 2340static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1da177e4 2341{
d760ce8f 2342 struct ib_mad_agent_private *mad_agent_priv;
1da177e4
LT
2343 struct ib_mad_send_wr_private *temp_mad_send_wr;
2344 struct list_head *list_item;
2345 unsigned long delay;
2346
d760ce8f 2347 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4
LT
2348 list_del(&mad_send_wr->agent_list);
2349
2350 delay = mad_send_wr->timeout;
2351 mad_send_wr->timeout += jiffies;
2352
29bb33dd
HR
2353 if (delay) {
2354 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2355 temp_mad_send_wr = list_entry(list_item,
2356 struct ib_mad_send_wr_private,
2357 agent_list);
2358 if (time_after(mad_send_wr->timeout,
2359 temp_mad_send_wr->timeout))
2360 break;
2361 }
1da177e4 2362 }
29bb33dd
HR
2363 else
2364 list_item = &mad_agent_priv->wait_list;
1da177e4
LT
2365 list_add(&mad_send_wr->agent_list, list_item);
2366
2367 /* Reschedule a work item if we have a shorter timeout */
e7c2f967
TH
2368 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2369 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2370 &mad_agent_priv->timed_work, delay);
1da177e4
LT
2371}
2372
03b61ad2
HR
2373void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2374 int timeout_ms)
2375{
2376 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2377 wait_for_response(mad_send_wr);
2378}
2379
1da177e4
LT
2380/*
2381 * Process a send work completion
2382 */
fa619a77
HR
2383void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2384 struct ib_mad_send_wc *mad_send_wc)
1da177e4
LT
2385{
2386 struct ib_mad_agent_private *mad_agent_priv;
2387 unsigned long flags;
fa619a77 2388 int ret;
1da177e4 2389
d760ce8f 2390 mad_agent_priv = mad_send_wr->mad_agent_priv;
1da177e4 2391 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1471cb6c 2392 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
fa619a77
HR
2393 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2394 if (ret == IB_RMPP_RESULT_CONSUMED)
2395 goto done;
2396 } else
2397 ret = IB_RMPP_RESULT_UNHANDLED;
2398
1da177e4
LT
2399 if (mad_send_wc->status != IB_WC_SUCCESS &&
2400 mad_send_wr->status == IB_WC_SUCCESS) {
2401 mad_send_wr->status = mad_send_wc->status;
2402 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2403 }
2404
2405 if (--mad_send_wr->refcount > 0) {
2406 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2407 mad_send_wr->status == IB_WC_SUCCESS) {
d760ce8f 2408 wait_for_response(mad_send_wr);
1da177e4 2409 }
fa619a77 2410 goto done;
1da177e4
LT
2411 }
2412
2413 /* Remove send from MAD agent and notify client of completion */
2414 list_del(&mad_send_wr->agent_list);
2415 adjust_timeout(mad_agent_priv);
2416 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2417
2418 if (mad_send_wr->status != IB_WC_SUCCESS )
2419 mad_send_wc->status = mad_send_wr->status;
34816ad9
SH
2420 if (ret == IB_RMPP_RESULT_INTERNAL)
2421 ib_rmpp_send_handler(mad_send_wc);
2422 else
fa619a77
HR
2423 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2424 mad_send_wc);
1da177e4
LT
2425
2426 /* Release reference on agent taken when sending */
1b52fa98 2427 deref_mad_agent(mad_agent_priv);
fa619a77
HR
2428 return;
2429done:
2430 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1da177e4
LT
2431}
2432
d53e11fd 2433static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
1da177e4 2434{
d53e11fd
CH
2435 struct ib_mad_port_private *port_priv = cq->cq_context;
2436 struct ib_mad_list_head *mad_list =
2437 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
1da177e4 2438 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
1da177e4
LT
2439 struct ib_mad_qp_info *qp_info;
2440 struct ib_mad_queue *send_queue;
2441 struct ib_send_wr *bad_send_wr;
34816ad9 2442 struct ib_mad_send_wc mad_send_wc;
1da177e4
LT
2443 unsigned long flags;
2444 int ret;
2445
d53e11fd
CH
2446 if (list_empty_careful(&port_priv->port_list))
2447 return;
2448
2449 if (wc->status != IB_WC_SUCCESS) {
2450 if (!ib_mad_send_error(port_priv, wc))
2451 return;
2452 }
2453
1da177e4
LT
2454 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2455 mad_list);
2456 send_queue = mad_list->mad_queue;
2457 qp_info = send_queue->qp_info;
2458
2459retry:
1527106f
RC
2460 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2461 mad_send_wr->header_mapping,
2462 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2463 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2464 mad_send_wr->payload_mapping,
2465 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
1da177e4
LT
2466 queued_send_wr = NULL;
2467 spin_lock_irqsave(&send_queue->lock, flags);
2468 list_del(&mad_list->list);
2469
2470 /* Move queued send to the send queue */
2471 if (send_queue->count-- > send_queue->max_active) {
2472 mad_list = container_of(qp_info->overflow_list.next,
2473 struct ib_mad_list_head, list);
2474 queued_send_wr = container_of(mad_list,
2475 struct ib_mad_send_wr_private,
2476 mad_list);
179e0917 2477 list_move_tail(&mad_list->list, &send_queue->list);
1da177e4
LT
2478 }
2479 spin_unlock_irqrestore(&send_queue->lock, flags);
2480
34816ad9
SH
2481 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2482 mad_send_wc.status = wc->status;
2483 mad_send_wc.vendor_err = wc->vendor_err;
1da177e4 2484 if (atomic_read(&qp_info->snoop_count))
34816ad9 2485 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1da177e4 2486 IB_MAD_SNOOP_SEND_COMPLETIONS);
34816ad9 2487 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1da177e4
LT
2488
2489 if (queued_send_wr) {
e622f2f4 2490 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
34816ad9 2491 &bad_send_wr);
1da177e4 2492 if (ret) {
7ef5d4b0
IW
2493 dev_err(&port_priv->device->dev,
2494 "ib_post_send failed: %d\n", ret);
1da177e4
LT
2495 mad_send_wr = queued_send_wr;
2496 wc->status = IB_WC_LOC_QP_OP_ERR;
2497 goto retry;
2498 }
2499 }
2500}
2501
2502static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2503{
2504 struct ib_mad_send_wr_private *mad_send_wr;
2505 struct ib_mad_list_head *mad_list;
2506 unsigned long flags;
2507
2508 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2509 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2510 mad_send_wr = container_of(mad_list,
2511 struct ib_mad_send_wr_private,
2512 mad_list);
2513 mad_send_wr->retry = 1;
2514 }
2515 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2516}
2517
d53e11fd
CH
2518static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2519 struct ib_wc *wc)
1da177e4 2520{
d53e11fd
CH
2521 struct ib_mad_list_head *mad_list =
2522 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2523 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
1da177e4
LT
2524 struct ib_mad_send_wr_private *mad_send_wr;
2525 int ret;
2526
1da177e4
LT
2527 /*
2528 * Send errors will transition the QP to SQE - move
2529 * QP to RTS and repost flushed work requests
2530 */
2531 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2532 mad_list);
2533 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2534 if (mad_send_wr->retry) {
2535 /* Repost send */
2536 struct ib_send_wr *bad_send_wr;
2537
2538 mad_send_wr->retry = 0;
e622f2f4 2539 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
1da177e4 2540 &bad_send_wr);
d53e11fd
CH
2541 if (!ret)
2542 return false;
2543 }
1da177e4
LT
2544 } else {
2545 struct ib_qp_attr *attr;
2546
2547 /* Transition QP to RTS and fail offending send */
2548 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2549 if (attr) {
2550 attr->qp_state = IB_QPS_RTS;
2551 attr->cur_qp_state = IB_QPS_SQE;
2552 ret = ib_modify_qp(qp_info->qp, attr,
2553 IB_QP_STATE | IB_QP_CUR_STATE);
2554 kfree(attr);
2555 if (ret)
7ef5d4b0 2556 dev_err(&port_priv->device->dev,
d53e11fd
CH
2557 "%s - ib_modify_qp to RTS: %d\n",
2558 __func__, ret);
1da177e4
LT
2559 else
2560 mark_sends_for_retry(qp_info);
2561 }
1da177e4 2562 }
1da177e4 2563
d53e11fd 2564 return true;
1da177e4
LT
2565}
2566
2567static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2568{
2569 unsigned long flags;
2570 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2571 struct ib_mad_send_wc mad_send_wc;
2572 struct list_head cancel_list;
2573
2574 INIT_LIST_HEAD(&cancel_list);
2575
2576 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2577 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2578 &mad_agent_priv->send_list, agent_list) {
2579 if (mad_send_wr->status == IB_WC_SUCCESS) {
3cd96564 2580 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
1da177e4
LT
2581 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2582 }
2583 }
2584
2585 /* Empty wait list to prevent receives from finding a request */
2586 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2587 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2588
2589 /* Report all cancelled requests */
2590 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2591 mad_send_wc.vendor_err = 0;
2592
2593 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2594 &cancel_list, agent_list) {
34816ad9
SH
2595 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2596 list_del(&mad_send_wr->agent_list);
1da177e4
LT
2597 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2598 &mad_send_wc);
1da177e4
LT
2599 atomic_dec(&mad_agent_priv->refcount);
2600 }
2601}
2602
2603static struct ib_mad_send_wr_private*
34816ad9
SH
2604find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2605 struct ib_mad_send_buf *send_buf)
1da177e4
LT
2606{
2607 struct ib_mad_send_wr_private *mad_send_wr;
2608
2609 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2610 agent_list) {
34816ad9 2611 if (&mad_send_wr->send_buf == send_buf)
1da177e4
LT
2612 return mad_send_wr;
2613 }
2614
2615 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2616 agent_list) {
c597eee5
IW
2617 if (is_rmpp_data_mad(mad_agent_priv,
2618 mad_send_wr->send_buf.mad) &&
34816ad9 2619 &mad_send_wr->send_buf == send_buf)
1da177e4
LT
2620 return mad_send_wr;
2621 }
2622 return NULL;
2623}
2624
34816ad9
SH
2625int ib_modify_mad(struct ib_mad_agent *mad_agent,
2626 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
1da177e4
LT
2627{
2628 struct ib_mad_agent_private *mad_agent_priv;
2629 struct ib_mad_send_wr_private *mad_send_wr;
2630 unsigned long flags;
cabe3cbc 2631 int active;
1da177e4
LT
2632
2633 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2634 agent);
2635 spin_lock_irqsave(&mad_agent_priv->lock, flags);
34816ad9 2636 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
03b61ad2 2637 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
1da177e4 2638 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2 2639 return -EINVAL;
1da177e4
LT
2640 }
2641
cabe3cbc 2642 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
03b61ad2 2643 if (!timeout_ms) {
1da177e4 2644 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
03b61ad2 2645 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
1da177e4
LT
2646 }
2647
34816ad9 2648 mad_send_wr->send_buf.timeout_ms = timeout_ms;
cabe3cbc 2649 if (active)
03b61ad2
HR
2650 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2651 else
2652 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2653
1da177e4 2654 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
03b61ad2
HR
2655 return 0;
2656}
2657EXPORT_SYMBOL(ib_modify_mad);
1da177e4 2658
34816ad9
SH
2659void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2660 struct ib_mad_send_buf *send_buf)
03b61ad2 2661{
34816ad9 2662 ib_modify_mad(mad_agent, send_buf, 0);
1da177e4
LT
2663}
2664EXPORT_SYMBOL(ib_cancel_mad);
2665
c4028958 2666static void local_completions(struct work_struct *work)
1da177e4
LT
2667{
2668 struct ib_mad_agent_private *mad_agent_priv;
2669 struct ib_mad_local_private *local;
2670 struct ib_mad_agent_private *recv_mad_agent;
2671 unsigned long flags;
1d9bc6d6 2672 int free_mad;
1da177e4
LT
2673 struct ib_wc wc;
2674 struct ib_mad_send_wc mad_send_wc;
8e4349d1 2675 bool opa;
1da177e4 2676
c4028958
DH
2677 mad_agent_priv =
2678 container_of(work, struct ib_mad_agent_private, local_work);
1da177e4 2679
8e4349d1
IW
2680 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2681 mad_agent_priv->qp_info->port_priv->port_num);
2682
1da177e4
LT
2683 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2684 while (!list_empty(&mad_agent_priv->local_list)) {
2685 local = list_entry(mad_agent_priv->local_list.next,
2686 struct ib_mad_local_private,
2687 completion_list);
37289efe 2688 list_del(&local->completion_list);
1da177e4 2689 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1d9bc6d6 2690 free_mad = 0;
1da177e4 2691 if (local->mad_priv) {
8e4349d1 2692 u8 base_version;
1da177e4
LT
2693 recv_mad_agent = local->recv_mad_agent;
2694 if (!recv_mad_agent) {
7ef5d4b0
IW
2695 dev_err(&mad_agent_priv->agent.device->dev,
2696 "No receive MAD agent for local completion\n");
1d9bc6d6 2697 free_mad = 1;
1da177e4
LT
2698 goto local_send_completion;
2699 }
2700
2701 /*
2702 * Defined behavior is to complete response
2703 * before request
2704 */
062dbb69 2705 build_smp_wc(recv_mad_agent->agent.qp,
d53e11fd 2706 local->mad_send_wr->send_wr.wr.wr_cqe,
97f52eb4 2707 be16_to_cpu(IB_LID_PERMISSIVE),
e622f2f4 2708 local->mad_send_wr->send_wr.pkey_index,
8e4349d1 2709 recv_mad_agent->agent.port_num, &wc);
1da177e4
LT
2710
2711 local->mad_priv->header.recv_wc.wc = &wc;
8e4349d1
IW
2712
2713 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2714 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2715 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2716 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2717 } else {
2718 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2719 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2720 }
2721
fa619a77
HR
2722 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2723 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2724 &local->mad_priv->header.recv_wc.rmpp_list);
1da177e4
LT
2725 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2726 local->mad_priv->header.recv_wc.recv_buf.mad =
c9082e51 2727 (struct ib_mad *)local->mad_priv->mad;
1da177e4
LT
2728 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2729 snoop_recv(recv_mad_agent->qp_info,
2730 &local->mad_priv->header.recv_wc,
2731 IB_MAD_SNOOP_RECVS);
2732 recv_mad_agent->agent.recv_handler(
2733 &recv_mad_agent->agent,
ca281265 2734 &local->mad_send_wr->send_buf,
1da177e4
LT
2735 &local->mad_priv->header.recv_wc);
2736 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2737 atomic_dec(&recv_mad_agent->refcount);
2738 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2739 }
2740
2741local_send_completion:
2742 /* Complete send */
2743 mad_send_wc.status = IB_WC_SUCCESS;
2744 mad_send_wc.vendor_err = 0;
34816ad9 2745 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
1da177e4 2746 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
34816ad9
SH
2747 snoop_send(mad_agent_priv->qp_info,
2748 &local->mad_send_wr->send_buf,
2749 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
1da177e4
LT
2750 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2751 &mad_send_wc);
2752
2753 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1da177e4 2754 atomic_dec(&mad_agent_priv->refcount);
1d9bc6d6 2755 if (free_mad)
c9082e51 2756 kfree(local->mad_priv);
1da177e4
LT
2757 kfree(local);
2758 }
2759 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2760}
2761
f75b7a52
HR
2762static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2763{
2764 int ret;
2765
4fc8cd49 2766 if (!mad_send_wr->retries_left)
f75b7a52
HR
2767 return -ETIMEDOUT;
2768
4fc8cd49
SH
2769 mad_send_wr->retries_left--;
2770 mad_send_wr->send_buf.retries++;
2771
34816ad9 2772 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
f75b7a52 2773
1471cb6c 2774 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
fa619a77
HR
2775 ret = ib_retry_rmpp(mad_send_wr);
2776 switch (ret) {
2777 case IB_RMPP_RESULT_UNHANDLED:
2778 ret = ib_send_mad(mad_send_wr);
2779 break;
2780 case IB_RMPP_RESULT_CONSUMED:
2781 ret = 0;
2782 break;
2783 default:
2784 ret = -ECOMM;
2785 break;
2786 }
2787 } else
2788 ret = ib_send_mad(mad_send_wr);
f75b7a52
HR
2789
2790 if (!ret) {
2791 mad_send_wr->refcount++;
f75b7a52
HR
2792 list_add_tail(&mad_send_wr->agent_list,
2793 &mad_send_wr->mad_agent_priv->send_list);
2794 }
2795 return ret;
2796}
2797
c4028958 2798static void timeout_sends(struct work_struct *work)
1da177e4
LT
2799{
2800 struct ib_mad_agent_private *mad_agent_priv;
2801 struct ib_mad_send_wr_private *mad_send_wr;
2802 struct ib_mad_send_wc mad_send_wc;
2803 unsigned long flags, delay;
2804
c4028958
DH
2805 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2806 timed_work.work);
1da177e4
LT
2807 mad_send_wc.vendor_err = 0;
2808
2809 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2810 while (!list_empty(&mad_agent_priv->wait_list)) {
2811 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2812 struct ib_mad_send_wr_private,
2813 agent_list);
2814
2815 if (time_after(mad_send_wr->timeout, jiffies)) {
2816 delay = mad_send_wr->timeout - jiffies;
2817 if ((long)delay <= 0)
2818 delay = 1;
2819 queue_delayed_work(mad_agent_priv->qp_info->
2820 port_priv->wq,
2821 &mad_agent_priv->timed_work, delay);
2822 break;
2823 }
2824
dbf9227b 2825 list_del(&mad_send_wr->agent_list);
29bb33dd
HR
2826 if (mad_send_wr->status == IB_WC_SUCCESS &&
2827 !retry_send(mad_send_wr))
f75b7a52
HR
2828 continue;
2829
1da177e4
LT
2830 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2831
03b61ad2
HR
2832 if (mad_send_wr->status == IB_WC_SUCCESS)
2833 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2834 else
2835 mad_send_wc.status = mad_send_wr->status;
34816ad9 2836 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1da177e4
LT
2837 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2838 &mad_send_wc);
2839
1da177e4
LT
2840 atomic_dec(&mad_agent_priv->refcount);
2841 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2842 }
2843 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2844}
2845
1da177e4
LT
2846/*
2847 * Allocate receive MADs and post receive WRs for them
2848 */
2849static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2850 struct ib_mad_private *mad)
2851{
2852 unsigned long flags;
2853 int post, ret;
2854 struct ib_mad_private *mad_priv;
2855 struct ib_sge sg_list;
2856 struct ib_recv_wr recv_wr, *bad_recv_wr;
2857 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2858
2859 /* Initialize common scatter list fields */
4be90bc6 2860 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
1da177e4
LT
2861
2862 /* Initialize common receive WR fields */
2863 recv_wr.next = NULL;
2864 recv_wr.sg_list = &sg_list;
2865 recv_wr.num_sge = 1;
2866
2867 do {
2868 /* Allocate and map receive buffer */
2869 if (mad) {
2870 mad_priv = mad;
2871 mad = NULL;
2872 } else {
c9082e51
IW
2873 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2874 GFP_ATOMIC);
1da177e4 2875 if (!mad_priv) {
7ef5d4b0
IW
2876 dev_err(&qp_info->port_priv->device->dev,
2877 "No memory for receive buffer\n");
1da177e4
LT
2878 ret = -ENOMEM;
2879 break;
2880 }
2881 }
c9082e51 2882 sg_list.length = mad_priv_dma_size(mad_priv);
1527106f
RC
2883 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2884 &mad_priv->grh,
c9082e51 2885 mad_priv_dma_size(mad_priv),
1527106f 2886 DMA_FROM_DEVICE);
2c34e68f
YB
2887 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2888 sg_list.addr))) {
2889 ret = -ENOMEM;
2890 break;
2891 }
1527106f 2892 mad_priv->header.mapping = sg_list.addr;
1da177e4 2893 mad_priv->header.mad_list.mad_queue = recv_queue;
d53e11fd
CH
2894 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2895 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
1da177e4
LT
2896
2897 /* Post receive WR */
2898 spin_lock_irqsave(&recv_queue->lock, flags);
2899 post = (++recv_queue->count < recv_queue->max_active);
2900 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2901 spin_unlock_irqrestore(&recv_queue->lock, flags);
2902 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2903 if (ret) {
2904 spin_lock_irqsave(&recv_queue->lock, flags);
2905 list_del(&mad_priv->header.mad_list.list);
2906 recv_queue->count--;
2907 spin_unlock_irqrestore(&recv_queue->lock, flags);
1527106f
RC
2908 ib_dma_unmap_single(qp_info->port_priv->device,
2909 mad_priv->header.mapping,
c9082e51 2910 mad_priv_dma_size(mad_priv),
1527106f 2911 DMA_FROM_DEVICE);
c9082e51 2912 kfree(mad_priv);
7ef5d4b0
IW
2913 dev_err(&qp_info->port_priv->device->dev,
2914 "ib_post_recv failed: %d\n", ret);
1da177e4
LT
2915 break;
2916 }
2917 } while (post);
2918
2919 return ret;
2920}
2921
2922/*
2923 * Return all the posted receive MADs
2924 */
2925static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2926{
2927 struct ib_mad_private_header *mad_priv_hdr;
2928 struct ib_mad_private *recv;
2929 struct ib_mad_list_head *mad_list;
2930
fac70d51
EC
2931 if (!qp_info->qp)
2932 return;
2933
1da177e4
LT
2934 while (!list_empty(&qp_info->recv_queue.list)) {
2935
2936 mad_list = list_entry(qp_info->recv_queue.list.next,
2937 struct ib_mad_list_head, list);
2938 mad_priv_hdr = container_of(mad_list,
2939 struct ib_mad_private_header,
2940 mad_list);
2941 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2942 header);
2943
2944 /* Remove from posted receive MAD list */
2945 list_del(&mad_list->list);
2946
1527106f
RC
2947 ib_dma_unmap_single(qp_info->port_priv->device,
2948 recv->header.mapping,
c9082e51 2949 mad_priv_dma_size(recv),
1527106f 2950 DMA_FROM_DEVICE);
c9082e51 2951 kfree(recv);
1da177e4
LT
2952 }
2953
2954 qp_info->recv_queue.count = 0;
2955}
2956
2957/*
2958 * Start the port
2959 */
2960static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2961{
2962 int ret, i;
2963 struct ib_qp_attr *attr;
2964 struct ib_qp *qp;
ef5ed416 2965 u16 pkey_index;
1da177e4
LT
2966
2967 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3cd96564 2968 if (!attr) {
7ef5d4b0
IW
2969 dev_err(&port_priv->device->dev,
2970 "Couldn't kmalloc ib_qp_attr\n");
1da177e4
LT
2971 return -ENOMEM;
2972 }
2973
ef5ed416
JM
2974 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2975 IB_DEFAULT_PKEY_FULL, &pkey_index);
2976 if (ret)
2977 pkey_index = 0;
2978
1da177e4
LT
2979 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2980 qp = port_priv->qp_info[i].qp;
fac70d51
EC
2981 if (!qp)
2982 continue;
2983
1da177e4
LT
2984 /*
2985 * PKey index for QP1 is irrelevant but
2986 * one is needed for the Reset to Init transition
2987 */
2988 attr->qp_state = IB_QPS_INIT;
ef5ed416 2989 attr->pkey_index = pkey_index;
1da177e4
LT
2990 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2991 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2992 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2993 if (ret) {
7ef5d4b0
IW
2994 dev_err(&port_priv->device->dev,
2995 "Couldn't change QP%d state to INIT: %d\n",
2996 i, ret);
1da177e4
LT
2997 goto out;
2998 }
2999
3000 attr->qp_state = IB_QPS_RTR;
3001 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3002 if (ret) {
7ef5d4b0
IW
3003 dev_err(&port_priv->device->dev,
3004 "Couldn't change QP%d state to RTR: %d\n",
3005 i, ret);
1da177e4
LT
3006 goto out;
3007 }
3008
3009 attr->qp_state = IB_QPS_RTS;
3010 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3011 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3012 if (ret) {
7ef5d4b0
IW
3013 dev_err(&port_priv->device->dev,
3014 "Couldn't change QP%d state to RTS: %d\n",
3015 i, ret);
1da177e4
LT
3016 goto out;
3017 }
3018 }
3019
3020 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3021 if (ret) {
7ef5d4b0
IW
3022 dev_err(&port_priv->device->dev,
3023 "Failed to request completion notification: %d\n",
3024 ret);
1da177e4
LT
3025 goto out;
3026 }
3027
3028 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
fac70d51
EC
3029 if (!port_priv->qp_info[i].qp)
3030 continue;
3031
1da177e4
LT
3032 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3033 if (ret) {
7ef5d4b0
IW
3034 dev_err(&port_priv->device->dev,
3035 "Couldn't post receive WRs\n");
1da177e4
LT
3036 goto out;
3037 }
3038 }
3039out:
3040 kfree(attr);
3041 return ret;
3042}
3043
3044static void qp_event_handler(struct ib_event *event, void *qp_context)
3045{
3046 struct ib_mad_qp_info *qp_info = qp_context;
3047
3048 /* It's worse than that! He's dead, Jim! */
7ef5d4b0
IW
3049 dev_err(&qp_info->port_priv->device->dev,
3050 "Fatal error (%d) on MAD QP (%d)\n",
1da177e4
LT
3051 event->event, qp_info->qp->qp_num);
3052}
3053
3054static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3055 struct ib_mad_queue *mad_queue)
3056{
3057 mad_queue->qp_info = qp_info;
3058 mad_queue->count = 0;
3059 spin_lock_init(&mad_queue->lock);
3060 INIT_LIST_HEAD(&mad_queue->list);
3061}
3062
3063static void init_mad_qp(struct ib_mad_port_private *port_priv,
3064 struct ib_mad_qp_info *qp_info)
3065{
3066 qp_info->port_priv = port_priv;
3067 init_mad_queue(qp_info, &qp_info->send_queue);
3068 init_mad_queue(qp_info, &qp_info->recv_queue);
3069 INIT_LIST_HEAD(&qp_info->overflow_list);
3070 spin_lock_init(&qp_info->snoop_lock);
3071 qp_info->snoop_table = NULL;
3072 qp_info->snoop_table_size = 0;
3073 atomic_set(&qp_info->snoop_count, 0);
3074}
3075
3076static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3077 enum ib_qp_type qp_type)
3078{
3079 struct ib_qp_init_attr qp_init_attr;
3080 int ret;
3081
3082 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3083 qp_init_attr.send_cq = qp_info->port_priv->cq;
3084 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3085 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
b76aabc3
HR
3086 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3087 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
1da177e4
LT
3088 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3089 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3090 qp_init_attr.qp_type = qp_type;
3091 qp_init_attr.port_num = qp_info->port_priv->port_num;
3092 qp_init_attr.qp_context = qp_info;
3093 qp_init_attr.event_handler = qp_event_handler;
3094 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3095 if (IS_ERR(qp_info->qp)) {
7ef5d4b0
IW
3096 dev_err(&qp_info->port_priv->device->dev,
3097 "Couldn't create ib_mad QP%d\n",
3098 get_spl_qp_index(qp_type));
1da177e4
LT
3099 ret = PTR_ERR(qp_info->qp);
3100 goto error;
3101 }
3102 /* Use minimum queue sizes unless the CQ is resized */
b76aabc3
HR
3103 qp_info->send_queue.max_active = mad_sendq_size;
3104 qp_info->recv_queue.max_active = mad_recvq_size;
1da177e4
LT
3105 return 0;
3106
3107error:
3108 return ret;
3109}
3110
3111static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3112{
fac70d51
EC
3113 if (!qp_info->qp)
3114 return;
3115
1da177e4 3116 ib_destroy_qp(qp_info->qp);
6044ec88 3117 kfree(qp_info->snoop_table);
1da177e4
LT
3118}
3119
3120/*
3121 * Open the port
3122 * Create the QP, PD, MR, and CQ if needed
3123 */
3124static int ib_mad_port_open(struct ib_device *device,
3125 int port_num)
3126{
3127 int ret, cq_size;
3128 struct ib_mad_port_private *port_priv;
3129 unsigned long flags;
3130 char name[sizeof "ib_mad123"];
fac70d51 3131 int has_smi;
1da177e4 3132
337877a4
IW
3133 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3134 return -EFAULT;
3135
548ead17
IW
3136 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3137 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3138 return -EFAULT;
3139
1da177e4 3140 /* Create new device info */
de6eb66b 3141 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
1da177e4 3142 if (!port_priv) {
7ef5d4b0 3143 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
1da177e4
LT
3144 return -ENOMEM;
3145 }
de6eb66b 3146
1da177e4
LT
3147 port_priv->device = device;
3148 port_priv->port_num = port_num;
3149 spin_lock_init(&port_priv->reg_lock);
3150 INIT_LIST_HEAD(&port_priv->agent_list);
3151 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3152 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3153
fac70d51 3154 cq_size = mad_sendq_size + mad_recvq_size;
29541e3a 3155 has_smi = rdma_cap_ib_smi(device, port_num);
fac70d51
EC
3156 if (has_smi)
3157 cq_size *= 2;
3158
d53e11fd
CH
3159 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3160 IB_POLL_WORKQUEUE);
1da177e4 3161 if (IS_ERR(port_priv->cq)) {
7ef5d4b0 3162 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
1da177e4
LT
3163 ret = PTR_ERR(port_priv->cq);
3164 goto error3;
3165 }
3166
3167 port_priv->pd = ib_alloc_pd(device);
3168 if (IS_ERR(port_priv->pd)) {
7ef5d4b0 3169 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
1da177e4
LT
3170 ret = PTR_ERR(port_priv->pd);
3171 goto error4;
3172 }
3173
fac70d51
EC
3174 if (has_smi) {
3175 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3176 if (ret)
3177 goto error6;
3178 }
1da177e4
LT
3179 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3180 if (ret)
3181 goto error7;
3182
3183 snprintf(name, sizeof name, "ib_mad%d", port_num);
3184 port_priv->wq = create_singlethread_workqueue(name);
3185 if (!port_priv->wq) {
3186 ret = -ENOMEM;
3187 goto error8;
3188 }
1da177e4 3189
dc05980d
MT
3190 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3191 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3192 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3193
1da177e4
LT
3194 ret = ib_mad_port_start(port_priv);
3195 if (ret) {
7ef5d4b0 3196 dev_err(&device->dev, "Couldn't start port\n");
1da177e4
LT
3197 goto error9;
3198 }
3199
1da177e4
LT
3200 return 0;
3201
3202error9:
dc05980d
MT
3203 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3204 list_del_init(&port_priv->port_list);
3205 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3206
1da177e4
LT
3207 destroy_workqueue(port_priv->wq);
3208error8:
3209 destroy_mad_qp(&port_priv->qp_info[1]);
3210error7:
3211 destroy_mad_qp(&port_priv->qp_info[0]);
3212error6:
1da177e4
LT
3213 ib_dealloc_pd(port_priv->pd);
3214error4:
d53e11fd 3215 ib_free_cq(port_priv->cq);
1da177e4
LT
3216 cleanup_recv_queue(&port_priv->qp_info[1]);
3217 cleanup_recv_queue(&port_priv->qp_info[0]);
3218error3:
3219 kfree(port_priv);
3220
3221 return ret;
3222}
3223
3224/*
3225 * Close the port
3226 * If there are no classes using the port, free the port
3227 * resources (CQ, MR, PD, QP) and remove the port's info structure
3228 */
3229static int ib_mad_port_close(struct ib_device *device, int port_num)
3230{
3231 struct ib_mad_port_private *port_priv;
3232 unsigned long flags;
3233
3234 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3235 port_priv = __ib_get_mad_port(device, port_num);
3236 if (port_priv == NULL) {
3237 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
7ef5d4b0 3238 dev_err(&device->dev, "Port %d not found\n", port_num);
1da177e4
LT
3239 return -ENODEV;
3240 }
dc05980d 3241 list_del_init(&port_priv->port_list);
1da177e4
LT
3242 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3243
1da177e4
LT
3244 destroy_workqueue(port_priv->wq);
3245 destroy_mad_qp(&port_priv->qp_info[1]);
3246 destroy_mad_qp(&port_priv->qp_info[0]);
1da177e4 3247 ib_dealloc_pd(port_priv->pd);
d53e11fd 3248 ib_free_cq(port_priv->cq);
1da177e4
LT
3249 cleanup_recv_queue(&port_priv->qp_info[1]);
3250 cleanup_recv_queue(&port_priv->qp_info[0]);
3251 /* XXX: Handle deallocation of MAD registration tables */
3252
3253 kfree(port_priv);
3254
3255 return 0;
3256}
3257
3258static void ib_mad_init_device(struct ib_device *device)
3259{
4139032b 3260 int start, i;
1da177e4 3261
4139032b 3262 start = rdma_start_port(device);
4ab6fb7e 3263
4139032b 3264 for (i = start; i <= rdma_end_port(device); i++) {
c757dea8 3265 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3266 continue;
3267
4ab6fb7e 3268 if (ib_mad_port_open(device, i)) {
7ef5d4b0 3269 dev_err(&device->dev, "Couldn't open port %d\n", i);
4ab6fb7e 3270 goto error;
1da177e4 3271 }
4ab6fb7e 3272 if (ib_agent_port_open(device, i)) {
7ef5d4b0
IW
3273 dev_err(&device->dev,
3274 "Couldn't open port %d for agents\n", i);
4ab6fb7e 3275 goto error_agent;
1da177e4
LT
3276 }
3277 }
f68bcc2d 3278 return;
1da177e4 3279
4ab6fb7e
RD
3280error_agent:
3281 if (ib_mad_port_close(device, i))
7ef5d4b0 3282 dev_err(&device->dev, "Couldn't close port %d\n", i);
4ab6fb7e
RD
3283
3284error:
827f2a8b 3285 while (--i >= start) {
c757dea8 3286 if (!rdma_cap_ib_mad(device, i))
827f2a8b 3287 continue;
4ab6fb7e 3288
4ab6fb7e 3289 if (ib_agent_port_close(device, i))
7ef5d4b0
IW
3290 dev_err(&device->dev,
3291 "Couldn't close port %d for agents\n", i);
4ab6fb7e 3292 if (ib_mad_port_close(device, i))
7ef5d4b0 3293 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4 3294 }
1da177e4
LT
3295}
3296
7c1eb45a 3297static void ib_mad_remove_device(struct ib_device *device, void *client_data)
1da177e4 3298{
4139032b 3299 int i;
827f2a8b 3300
4139032b 3301 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
c757dea8 3302 if (!rdma_cap_ib_mad(device, i))
827f2a8b
MW
3303 continue;
3304
3305 if (ib_agent_port_close(device, i))
7ef5d4b0 3306 dev_err(&device->dev,
827f2a8b
MW
3307 "Couldn't close port %d for agents\n", i);
3308 if (ib_mad_port_close(device, i))
3309 dev_err(&device->dev, "Couldn't close port %d\n", i);
1da177e4
LT
3310 }
3311}
3312
3313static struct ib_client mad_client = {
3314 .name = "mad",
3315 .add = ib_mad_init_device,
3316 .remove = ib_mad_remove_device
3317};
3318
3319static int __init ib_mad_init_module(void)
3320{
b76aabc3
HR
3321 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3322 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3323
3324 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3325 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3326
1da177e4
LT
3327 INIT_LIST_HEAD(&ib_mad_port_list);
3328
3329 if (ib_register_client(&mad_client)) {
7ef5d4b0 3330 pr_err("Couldn't register ib_mad client\n");
c9082e51 3331 return -EINVAL;
1da177e4
LT
3332 }
3333
3334 return 0;
1da177e4
LT
3335}
3336
3337static void __exit ib_mad_cleanup_module(void)
3338{
3339 ib_unregister_client(&mad_client);
1da177e4
LT
3340}
3341
3342module_init(ib_mad_init_module);
3343module_exit(ib_mad_cleanup_module);
This page took 1.418234 seconds and 5 git commands to generate.