Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / cmd.c
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
40
41 #include <linux/mlx4/cmd.h>
42 #include <linux/semaphore.h>
43 #include <rdma/ib_smi.h>
44
45 #include <asm/io.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define CMD_POLL_TOKEN 0xffff
51 #define INBOX_MASK 0xffffffffffffff00ULL
52
53 #define CMD_CHAN_VER 1
54 #define CMD_CHAN_IF_REV 1
55
56 enum {
57 /* command completed successfully: */
58 CMD_STAT_OK = 0x00,
59 /* Internal error (such as a bus error) occurred while processing command: */
60 CMD_STAT_INTERNAL_ERR = 0x01,
61 /* Operation/command not supported or opcode modifier not supported: */
62 CMD_STAT_BAD_OP = 0x02,
63 /* Parameter not supported or parameter out of range: */
64 CMD_STAT_BAD_PARAM = 0x03,
65 /* System not enabled or bad system state: */
66 CMD_STAT_BAD_SYS_STATE = 0x04,
67 /* Attempt to access reserved or unallocaterd resource: */
68 CMD_STAT_BAD_RESOURCE = 0x05,
69 /* Requested resource is currently executing a command, or is otherwise busy: */
70 CMD_STAT_RESOURCE_BUSY = 0x06,
71 /* Required capability exceeds device limits: */
72 CMD_STAT_EXCEED_LIM = 0x08,
73 /* Resource is not in the appropriate state or ownership: */
74 CMD_STAT_BAD_RES_STATE = 0x09,
75 /* Index out of range: */
76 CMD_STAT_BAD_INDEX = 0x0a,
77 /* FW image corrupted: */
78 CMD_STAT_BAD_NVMEM = 0x0b,
79 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
80 CMD_STAT_ICM_ERROR = 0x0c,
81 /* Attempt to modify a QP/EE which is not in the presumed state: */
82 CMD_STAT_BAD_QP_STATE = 0x10,
83 /* Bad segment parameters (Address/Size): */
84 CMD_STAT_BAD_SEG_PARAM = 0x20,
85 /* Memory Region has Memory Windows bound to: */
86 CMD_STAT_REG_BOUND = 0x21,
87 /* HCA local attached memory not present: */
88 CMD_STAT_LAM_NOT_PRE = 0x22,
89 /* Bad management packet (silently discarded): */
90 CMD_STAT_BAD_PKT = 0x30,
91 /* More outstanding CQEs in CQ than new CQ size: */
92 CMD_STAT_BAD_SIZE = 0x40,
93 /* Multi Function device support required: */
94 CMD_STAT_MULTI_FUNC_REQ = 0x50,
95 };
96
97 enum {
98 HCR_IN_PARAM_OFFSET = 0x00,
99 HCR_IN_MODIFIER_OFFSET = 0x08,
100 HCR_OUT_PARAM_OFFSET = 0x0c,
101 HCR_TOKEN_OFFSET = 0x14,
102 HCR_STATUS_OFFSET = 0x18,
103
104 HCR_OPMOD_SHIFT = 12,
105 HCR_T_BIT = 21,
106 HCR_E_BIT = 22,
107 HCR_GO_BIT = 23
108 };
109
110 enum {
111 GO_BIT_TIMEOUT_MSECS = 10000
112 };
113
114 struct mlx4_cmd_context {
115 struct completion done;
116 int result;
117 int next;
118 u64 out_param;
119 u16 token;
120 u8 fw_status;
121 };
122
123 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
124 struct mlx4_vhcr_cmd *in_vhcr);
125
126 static int mlx4_status_to_errno(u8 status)
127 {
128 static const int trans_table[] = {
129 [CMD_STAT_INTERNAL_ERR] = -EIO,
130 [CMD_STAT_BAD_OP] = -EPERM,
131 [CMD_STAT_BAD_PARAM] = -EINVAL,
132 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
133 [CMD_STAT_BAD_RESOURCE] = -EBADF,
134 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
135 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
136 [CMD_STAT_BAD_RES_STATE] = -EBADF,
137 [CMD_STAT_BAD_INDEX] = -EBADF,
138 [CMD_STAT_BAD_NVMEM] = -EFAULT,
139 [CMD_STAT_ICM_ERROR] = -ENFILE,
140 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
141 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
142 [CMD_STAT_REG_BOUND] = -EBUSY,
143 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
144 [CMD_STAT_BAD_PKT] = -EINVAL,
145 [CMD_STAT_BAD_SIZE] = -ENOMEM,
146 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
147 };
148
149 if (status >= ARRAY_SIZE(trans_table) ||
150 (status != CMD_STAT_OK && trans_table[status] == 0))
151 return -EIO;
152
153 return trans_table[status];
154 }
155
156 static u8 mlx4_errno_to_status(int errno)
157 {
158 switch (errno) {
159 case -EPERM:
160 return CMD_STAT_BAD_OP;
161 case -EINVAL:
162 return CMD_STAT_BAD_PARAM;
163 case -ENXIO:
164 return CMD_STAT_BAD_SYS_STATE;
165 case -EBUSY:
166 return CMD_STAT_RESOURCE_BUSY;
167 case -ENOMEM:
168 return CMD_STAT_EXCEED_LIM;
169 case -ENFILE:
170 return CMD_STAT_ICM_ERROR;
171 default:
172 return CMD_STAT_INTERNAL_ERR;
173 }
174 }
175
176 static int comm_pending(struct mlx4_dev *dev)
177 {
178 struct mlx4_priv *priv = mlx4_priv(dev);
179 u32 status = readl(&priv->mfunc.comm->slave_read);
180
181 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
182 }
183
184 static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
185 {
186 struct mlx4_priv *priv = mlx4_priv(dev);
187 u32 val;
188
189 priv->cmd.comm_toggle ^= 1;
190 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
191 __raw_writel((__force u32) cpu_to_be32(val),
192 &priv->mfunc.comm->slave_write);
193 mmiowb();
194 }
195
196 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
197 unsigned long timeout)
198 {
199 struct mlx4_priv *priv = mlx4_priv(dev);
200 unsigned long end;
201 int err = 0;
202 int ret_from_pending = 0;
203
204 /* First, verify that the master reports correct status */
205 if (comm_pending(dev)) {
206 mlx4_warn(dev, "Communication channel is not idle."
207 "my toggle is %d (cmd:0x%x)\n",
208 priv->cmd.comm_toggle, cmd);
209 return -EAGAIN;
210 }
211
212 /* Write command */
213 down(&priv->cmd.poll_sem);
214 mlx4_comm_cmd_post(dev, cmd, param);
215
216 end = msecs_to_jiffies(timeout) + jiffies;
217 while (comm_pending(dev) && time_before(jiffies, end))
218 cond_resched();
219 ret_from_pending = comm_pending(dev);
220 if (ret_from_pending) {
221 /* check if the slave is trying to boot in the middle of
222 * FLR process. The only non-zero result in the RESET command
223 * is MLX4_DELAY_RESET_SLAVE*/
224 if ((MLX4_COMM_CMD_RESET == cmd)) {
225 err = MLX4_DELAY_RESET_SLAVE;
226 } else {
227 mlx4_warn(dev, "Communication channel timed out\n");
228 err = -ETIMEDOUT;
229 }
230 }
231
232 up(&priv->cmd.poll_sem);
233 return err;
234 }
235
236 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
237 u16 param, unsigned long timeout)
238 {
239 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
240 struct mlx4_cmd_context *context;
241 unsigned long end;
242 int err = 0;
243
244 down(&cmd->event_sem);
245
246 spin_lock(&cmd->context_lock);
247 BUG_ON(cmd->free_head < 0);
248 context = &cmd->context[cmd->free_head];
249 context->token += cmd->token_mask + 1;
250 cmd->free_head = context->next;
251 spin_unlock(&cmd->context_lock);
252
253 init_completion(&context->done);
254
255 mlx4_comm_cmd_post(dev, op, param);
256
257 if (!wait_for_completion_timeout(&context->done,
258 msecs_to_jiffies(timeout))) {
259 err = -EBUSY;
260 goto out;
261 }
262
263 err = context->result;
264 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
265 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
266 op, context->fw_status);
267 goto out;
268 }
269
270 out:
271 /* wait for comm channel ready
272 * this is necessary for prevention the race
273 * when switching between event to polling mode
274 */
275 end = msecs_to_jiffies(timeout) + jiffies;
276 while (comm_pending(dev) && time_before(jiffies, end))
277 cond_resched();
278
279 spin_lock(&cmd->context_lock);
280 context->next = cmd->free_head;
281 cmd->free_head = context - cmd->context;
282 spin_unlock(&cmd->context_lock);
283
284 up(&cmd->event_sem);
285 return err;
286 }
287
288 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
289 unsigned long timeout)
290 {
291 if (mlx4_priv(dev)->cmd.use_events)
292 return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
293 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
294 }
295
296 static int cmd_pending(struct mlx4_dev *dev)
297 {
298 u32 status;
299
300 if (pci_channel_offline(dev->pdev))
301 return -EIO;
302
303 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
304
305 return (status & swab32(1 << HCR_GO_BIT)) ||
306 (mlx4_priv(dev)->cmd.toggle ==
307 !!(status & swab32(1 << HCR_T_BIT)));
308 }
309
310 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
311 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
312 int event)
313 {
314 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
315 u32 __iomem *hcr = cmd->hcr;
316 int ret = -EAGAIN;
317 unsigned long end;
318
319 mutex_lock(&cmd->hcr_mutex);
320
321 if (pci_channel_offline(dev->pdev)) {
322 /*
323 * Device is going through error recovery
324 * and cannot accept commands.
325 */
326 ret = -EIO;
327 goto out;
328 }
329
330 end = jiffies;
331 if (event)
332 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
333
334 while (cmd_pending(dev)) {
335 if (pci_channel_offline(dev->pdev)) {
336 /*
337 * Device is going through error recovery
338 * and cannot accept commands.
339 */
340 ret = -EIO;
341 goto out;
342 }
343
344 if (time_after_eq(jiffies, end)) {
345 mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
346 goto out;
347 }
348 cond_resched();
349 }
350
351 /*
352 * We use writel (instead of something like memcpy_toio)
353 * because writes of less than 32 bits to the HCR don't work
354 * (and some architectures such as ia64 implement memcpy_toio
355 * in terms of writeb).
356 */
357 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
358 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
359 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
360 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
361 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
362 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
363
364 /* __raw_writel may not order writes. */
365 wmb();
366
367 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
368 (cmd->toggle << HCR_T_BIT) |
369 (event ? (1 << HCR_E_BIT) : 0) |
370 (op_modifier << HCR_OPMOD_SHIFT) |
371 op), hcr + 6);
372
373 /*
374 * Make sure that our HCR writes don't get mixed in with
375 * writes from another CPU starting a FW command.
376 */
377 mmiowb();
378
379 cmd->toggle = cmd->toggle ^ 1;
380
381 ret = 0;
382
383 out:
384 mutex_unlock(&cmd->hcr_mutex);
385 return ret;
386 }
387
388 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
389 int out_is_imm, u32 in_modifier, u8 op_modifier,
390 u16 op, unsigned long timeout)
391 {
392 struct mlx4_priv *priv = mlx4_priv(dev);
393 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
394 int ret;
395
396 mutex_lock(&priv->cmd.slave_cmd_mutex);
397
398 vhcr->in_param = cpu_to_be64(in_param);
399 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
400 vhcr->in_modifier = cpu_to_be32(in_modifier);
401 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
402 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
403 vhcr->status = 0;
404 vhcr->flags = !!(priv->cmd.use_events) << 6;
405
406 if (mlx4_is_master(dev)) {
407 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
408 if (!ret) {
409 if (out_is_imm) {
410 if (out_param)
411 *out_param =
412 be64_to_cpu(vhcr->out_param);
413 else {
414 mlx4_err(dev, "response expected while"
415 "output mailbox is NULL for "
416 "command 0x%x\n", op);
417 vhcr->status = CMD_STAT_BAD_PARAM;
418 }
419 }
420 ret = mlx4_status_to_errno(vhcr->status);
421 }
422 } else {
423 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
424 MLX4_COMM_TIME + timeout);
425 if (!ret) {
426 if (out_is_imm) {
427 if (out_param)
428 *out_param =
429 be64_to_cpu(vhcr->out_param);
430 else {
431 mlx4_err(dev, "response expected while"
432 "output mailbox is NULL for "
433 "command 0x%x\n", op);
434 vhcr->status = CMD_STAT_BAD_PARAM;
435 }
436 }
437 ret = mlx4_status_to_errno(vhcr->status);
438 } else
439 mlx4_err(dev, "failed execution of VHCR_POST command"
440 "opcode 0x%x\n", op);
441 }
442
443 mutex_unlock(&priv->cmd.slave_cmd_mutex);
444 return ret;
445 }
446
447 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
448 int out_is_imm, u32 in_modifier, u8 op_modifier,
449 u16 op, unsigned long timeout)
450 {
451 struct mlx4_priv *priv = mlx4_priv(dev);
452 void __iomem *hcr = priv->cmd.hcr;
453 int err = 0;
454 unsigned long end;
455 u32 stat;
456
457 down(&priv->cmd.poll_sem);
458
459 if (pci_channel_offline(dev->pdev)) {
460 /*
461 * Device is going through error recovery
462 * and cannot accept commands.
463 */
464 err = -EIO;
465 goto out;
466 }
467
468 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
469 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
470 if (err)
471 goto out;
472
473 end = msecs_to_jiffies(timeout) + jiffies;
474 while (cmd_pending(dev) && time_before(jiffies, end)) {
475 if (pci_channel_offline(dev->pdev)) {
476 /*
477 * Device is going through error recovery
478 * and cannot accept commands.
479 */
480 err = -EIO;
481 goto out;
482 }
483
484 cond_resched();
485 }
486
487 if (cmd_pending(dev)) {
488 err = -ETIMEDOUT;
489 goto out;
490 }
491
492 if (out_is_imm)
493 *out_param =
494 (u64) be32_to_cpu((__force __be32)
495 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
496 (u64) be32_to_cpu((__force __be32)
497 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
498 stat = be32_to_cpu((__force __be32)
499 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
500 err = mlx4_status_to_errno(stat);
501 if (err)
502 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
503 op, stat);
504
505 out:
506 up(&priv->cmd.poll_sem);
507 return err;
508 }
509
510 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
511 {
512 struct mlx4_priv *priv = mlx4_priv(dev);
513 struct mlx4_cmd_context *context =
514 &priv->cmd.context[token & priv->cmd.token_mask];
515
516 /* previously timed out command completing at long last */
517 if (token != context->token)
518 return;
519
520 context->fw_status = status;
521 context->result = mlx4_status_to_errno(status);
522 context->out_param = out_param;
523
524 complete(&context->done);
525 }
526
527 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
528 int out_is_imm, u32 in_modifier, u8 op_modifier,
529 u16 op, unsigned long timeout)
530 {
531 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
532 struct mlx4_cmd_context *context;
533 int err = 0;
534
535 down(&cmd->event_sem);
536
537 spin_lock(&cmd->context_lock);
538 BUG_ON(cmd->free_head < 0);
539 context = &cmd->context[cmd->free_head];
540 context->token += cmd->token_mask + 1;
541 cmd->free_head = context->next;
542 spin_unlock(&cmd->context_lock);
543
544 init_completion(&context->done);
545
546 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
547 in_modifier, op_modifier, op, context->token, 1);
548
549 if (!wait_for_completion_timeout(&context->done,
550 msecs_to_jiffies(timeout))) {
551 err = -EBUSY;
552 goto out;
553 }
554
555 err = context->result;
556 if (err) {
557 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
558 op, context->fw_status);
559 goto out;
560 }
561
562 if (out_is_imm)
563 *out_param = context->out_param;
564
565 out:
566 spin_lock(&cmd->context_lock);
567 context->next = cmd->free_head;
568 cmd->free_head = context - cmd->context;
569 spin_unlock(&cmd->context_lock);
570
571 up(&cmd->event_sem);
572 return err;
573 }
574
575 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
576 int out_is_imm, u32 in_modifier, u8 op_modifier,
577 u16 op, unsigned long timeout, int native)
578 {
579 if (pci_channel_offline(dev->pdev))
580 return -EIO;
581
582 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
583 if (mlx4_priv(dev)->cmd.use_events)
584 return mlx4_cmd_wait(dev, in_param, out_param,
585 out_is_imm, in_modifier,
586 op_modifier, op, timeout);
587 else
588 return mlx4_cmd_poll(dev, in_param, out_param,
589 out_is_imm, in_modifier,
590 op_modifier, op, timeout);
591 }
592 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
593 in_modifier, op_modifier, op, timeout);
594 }
595 EXPORT_SYMBOL_GPL(__mlx4_cmd);
596
597
598 static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
599 {
600 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
601 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
602 }
603
604 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
605 int slave, u64 slave_addr,
606 int size, int is_read)
607 {
608 u64 in_param;
609 u64 out_param;
610
611 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
612 (slave & ~0x7f) | (size & 0xff)) {
613 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
614 "master_addr:0x%llx slave_id:%d size:%d\n",
615 slave_addr, master_addr, slave, size);
616 return -EINVAL;
617 }
618
619 if (is_read) {
620 in_param = (u64) slave | slave_addr;
621 out_param = (u64) dev->caps.function | master_addr;
622 } else {
623 in_param = (u64) dev->caps.function | master_addr;
624 out_param = (u64) slave | slave_addr;
625 }
626
627 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
628 MLX4_CMD_ACCESS_MEM,
629 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
630 }
631
632 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
633 struct mlx4_cmd_mailbox *inbox,
634 struct mlx4_cmd_mailbox *outbox)
635 {
636 struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
637 struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
638 int err;
639 int i;
640
641 if (index & 0x1f)
642 return -EINVAL;
643
644 in_mad->attr_mod = cpu_to_be32(index / 32);
645
646 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
647 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
648 MLX4_CMD_NATIVE);
649 if (err)
650 return err;
651
652 for (i = 0; i < 32; ++i)
653 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
654
655 return err;
656 }
657
658 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
659 struct mlx4_cmd_mailbox *inbox,
660 struct mlx4_cmd_mailbox *outbox)
661 {
662 int i;
663 int err;
664
665 for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
666 err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
667 if (err)
668 return err;
669 }
670
671 return 0;
672 }
673 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
674 #define PORT_STATE_OFFSET 32
675
676 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
677 {
678 if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
679 return IB_PORT_ACTIVE;
680 else
681 return IB_PORT_DOWN;
682 }
683
684 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
685 struct mlx4_vhcr *vhcr,
686 struct mlx4_cmd_mailbox *inbox,
687 struct mlx4_cmd_mailbox *outbox,
688 struct mlx4_cmd_info *cmd)
689 {
690 struct ib_smp *smp = inbox->buf;
691 u32 index;
692 u8 port;
693 u16 *table;
694 int err;
695 int vidx, pidx;
696 struct mlx4_priv *priv = mlx4_priv(dev);
697 struct ib_smp *outsmp = outbox->buf;
698 __be16 *outtab = (__be16 *)(outsmp->data);
699 __be32 slave_cap_mask;
700 __be64 slave_node_guid;
701 port = vhcr->in_modifier;
702
703 if (smp->base_version == 1 &&
704 smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
705 smp->class_version == 1) {
706 if (smp->method == IB_MGMT_METHOD_GET) {
707 if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
708 index = be32_to_cpu(smp->attr_mod);
709 if (port < 1 || port > dev->caps.num_ports)
710 return -EINVAL;
711 table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
712 if (!table)
713 return -ENOMEM;
714 /* need to get the full pkey table because the paravirtualized
715 * pkeys may be scattered among several pkey blocks.
716 */
717 err = get_full_pkey_table(dev, port, table, inbox, outbox);
718 if (!err) {
719 for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
720 pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
721 outtab[vidx % 32] = cpu_to_be16(table[pidx]);
722 }
723 }
724 kfree(table);
725 return err;
726 }
727 if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
728 /*get the slave specific caps:*/
729 /*do the command */
730 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
731 vhcr->in_modifier, vhcr->op_modifier,
732 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
733 /* modify the response for slaves */
734 if (!err && slave != mlx4_master_func_num(dev)) {
735 u8 *state = outsmp->data + PORT_STATE_OFFSET;
736
737 *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
738 slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
739 memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
740 }
741 return err;
742 }
743 if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
744 /* compute slave's gid block */
745 smp->attr_mod = cpu_to_be32(slave / 8);
746 /* execute cmd */
747 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
748 vhcr->in_modifier, vhcr->op_modifier,
749 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
750 if (!err) {
751 /* if needed, move slave gid to index 0 */
752 if (slave % 8)
753 memcpy(outsmp->data,
754 outsmp->data + (slave % 8) * 8, 8);
755 /* delete all other gids */
756 memset(outsmp->data + 8, 0, 56);
757 }
758 return err;
759 }
760 if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
761 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
762 vhcr->in_modifier, vhcr->op_modifier,
763 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
764 if (!err) {
765 slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
766 memcpy(outsmp->data + 12, &slave_node_guid, 8);
767 }
768 return err;
769 }
770 }
771 }
772 if (slave != mlx4_master_func_num(dev) &&
773 ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
774 (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
775 smp->method == IB_MGMT_METHOD_SET))) {
776 mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
777 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
778 slave, smp->method, smp->mgmt_class,
779 be16_to_cpu(smp->attr_id));
780 return -EPERM;
781 }
782 /*default:*/
783 return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
784 vhcr->in_modifier, vhcr->op_modifier,
785 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
786 }
787
788 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
789 struct mlx4_vhcr *vhcr,
790 struct mlx4_cmd_mailbox *inbox,
791 struct mlx4_cmd_mailbox *outbox,
792 struct mlx4_cmd_info *cmd)
793 {
794 u64 in_param;
795 u64 out_param;
796 int err;
797
798 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
799 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
800 if (cmd->encode_slave_id) {
801 in_param &= 0xffffffffffffff00ll;
802 in_param |= slave;
803 }
804
805 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
806 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
807 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
808
809 if (cmd->out_is_imm)
810 vhcr->out_param = out_param;
811
812 return err;
813 }
814
815 static struct mlx4_cmd_info cmd_info[] = {
816 {
817 .opcode = MLX4_CMD_QUERY_FW,
818 .has_inbox = false,
819 .has_outbox = true,
820 .out_is_imm = false,
821 .encode_slave_id = false,
822 .verify = NULL,
823 .wrapper = mlx4_QUERY_FW_wrapper
824 },
825 {
826 .opcode = MLX4_CMD_QUERY_HCA,
827 .has_inbox = false,
828 .has_outbox = true,
829 .out_is_imm = false,
830 .encode_slave_id = false,
831 .verify = NULL,
832 .wrapper = NULL
833 },
834 {
835 .opcode = MLX4_CMD_QUERY_DEV_CAP,
836 .has_inbox = false,
837 .has_outbox = true,
838 .out_is_imm = false,
839 .encode_slave_id = false,
840 .verify = NULL,
841 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
842 },
843 {
844 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
845 .has_inbox = false,
846 .has_outbox = true,
847 .out_is_imm = false,
848 .encode_slave_id = false,
849 .verify = NULL,
850 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
851 },
852 {
853 .opcode = MLX4_CMD_QUERY_ADAPTER,
854 .has_inbox = false,
855 .has_outbox = true,
856 .out_is_imm = false,
857 .encode_slave_id = false,
858 .verify = NULL,
859 .wrapper = NULL
860 },
861 {
862 .opcode = MLX4_CMD_INIT_PORT,
863 .has_inbox = false,
864 .has_outbox = false,
865 .out_is_imm = false,
866 .encode_slave_id = false,
867 .verify = NULL,
868 .wrapper = mlx4_INIT_PORT_wrapper
869 },
870 {
871 .opcode = MLX4_CMD_CLOSE_PORT,
872 .has_inbox = false,
873 .has_outbox = false,
874 .out_is_imm = false,
875 .encode_slave_id = false,
876 .verify = NULL,
877 .wrapper = mlx4_CLOSE_PORT_wrapper
878 },
879 {
880 .opcode = MLX4_CMD_QUERY_PORT,
881 .has_inbox = false,
882 .has_outbox = true,
883 .out_is_imm = false,
884 .encode_slave_id = false,
885 .verify = NULL,
886 .wrapper = mlx4_QUERY_PORT_wrapper
887 },
888 {
889 .opcode = MLX4_CMD_SET_PORT,
890 .has_inbox = true,
891 .has_outbox = false,
892 .out_is_imm = false,
893 .encode_slave_id = false,
894 .verify = NULL,
895 .wrapper = mlx4_SET_PORT_wrapper
896 },
897 {
898 .opcode = MLX4_CMD_MAP_EQ,
899 .has_inbox = false,
900 .has_outbox = false,
901 .out_is_imm = false,
902 .encode_slave_id = false,
903 .verify = NULL,
904 .wrapper = mlx4_MAP_EQ_wrapper
905 },
906 {
907 .opcode = MLX4_CMD_SW2HW_EQ,
908 .has_inbox = true,
909 .has_outbox = false,
910 .out_is_imm = false,
911 .encode_slave_id = true,
912 .verify = NULL,
913 .wrapper = mlx4_SW2HW_EQ_wrapper
914 },
915 {
916 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
917 .has_inbox = false,
918 .has_outbox = false,
919 .out_is_imm = false,
920 .encode_slave_id = false,
921 .verify = NULL,
922 .wrapper = NULL
923 },
924 {
925 .opcode = MLX4_CMD_NOP,
926 .has_inbox = false,
927 .has_outbox = false,
928 .out_is_imm = false,
929 .encode_slave_id = false,
930 .verify = NULL,
931 .wrapper = NULL
932 },
933 {
934 .opcode = MLX4_CMD_ALLOC_RES,
935 .has_inbox = false,
936 .has_outbox = false,
937 .out_is_imm = true,
938 .encode_slave_id = false,
939 .verify = NULL,
940 .wrapper = mlx4_ALLOC_RES_wrapper
941 },
942 {
943 .opcode = MLX4_CMD_FREE_RES,
944 .has_inbox = false,
945 .has_outbox = false,
946 .out_is_imm = false,
947 .encode_slave_id = false,
948 .verify = NULL,
949 .wrapper = mlx4_FREE_RES_wrapper
950 },
951 {
952 .opcode = MLX4_CMD_SW2HW_MPT,
953 .has_inbox = true,
954 .has_outbox = false,
955 .out_is_imm = false,
956 .encode_slave_id = true,
957 .verify = NULL,
958 .wrapper = mlx4_SW2HW_MPT_wrapper
959 },
960 {
961 .opcode = MLX4_CMD_QUERY_MPT,
962 .has_inbox = false,
963 .has_outbox = true,
964 .out_is_imm = false,
965 .encode_slave_id = false,
966 .verify = NULL,
967 .wrapper = mlx4_QUERY_MPT_wrapper
968 },
969 {
970 .opcode = MLX4_CMD_HW2SW_MPT,
971 .has_inbox = false,
972 .has_outbox = false,
973 .out_is_imm = false,
974 .encode_slave_id = false,
975 .verify = NULL,
976 .wrapper = mlx4_HW2SW_MPT_wrapper
977 },
978 {
979 .opcode = MLX4_CMD_READ_MTT,
980 .has_inbox = false,
981 .has_outbox = true,
982 .out_is_imm = false,
983 .encode_slave_id = false,
984 .verify = NULL,
985 .wrapper = NULL
986 },
987 {
988 .opcode = MLX4_CMD_WRITE_MTT,
989 .has_inbox = true,
990 .has_outbox = false,
991 .out_is_imm = false,
992 .encode_slave_id = false,
993 .verify = NULL,
994 .wrapper = mlx4_WRITE_MTT_wrapper
995 },
996 {
997 .opcode = MLX4_CMD_SYNC_TPT,
998 .has_inbox = true,
999 .has_outbox = false,
1000 .out_is_imm = false,
1001 .encode_slave_id = false,
1002 .verify = NULL,
1003 .wrapper = NULL
1004 },
1005 {
1006 .opcode = MLX4_CMD_HW2SW_EQ,
1007 .has_inbox = false,
1008 .has_outbox = true,
1009 .out_is_imm = false,
1010 .encode_slave_id = true,
1011 .verify = NULL,
1012 .wrapper = mlx4_HW2SW_EQ_wrapper
1013 },
1014 {
1015 .opcode = MLX4_CMD_QUERY_EQ,
1016 .has_inbox = false,
1017 .has_outbox = true,
1018 .out_is_imm = false,
1019 .encode_slave_id = true,
1020 .verify = NULL,
1021 .wrapper = mlx4_QUERY_EQ_wrapper
1022 },
1023 {
1024 .opcode = MLX4_CMD_SW2HW_CQ,
1025 .has_inbox = true,
1026 .has_outbox = false,
1027 .out_is_imm = false,
1028 .encode_slave_id = true,
1029 .verify = NULL,
1030 .wrapper = mlx4_SW2HW_CQ_wrapper
1031 },
1032 {
1033 .opcode = MLX4_CMD_HW2SW_CQ,
1034 .has_inbox = false,
1035 .has_outbox = false,
1036 .out_is_imm = false,
1037 .encode_slave_id = false,
1038 .verify = NULL,
1039 .wrapper = mlx4_HW2SW_CQ_wrapper
1040 },
1041 {
1042 .opcode = MLX4_CMD_QUERY_CQ,
1043 .has_inbox = false,
1044 .has_outbox = true,
1045 .out_is_imm = false,
1046 .encode_slave_id = false,
1047 .verify = NULL,
1048 .wrapper = mlx4_QUERY_CQ_wrapper
1049 },
1050 {
1051 .opcode = MLX4_CMD_MODIFY_CQ,
1052 .has_inbox = true,
1053 .has_outbox = false,
1054 .out_is_imm = true,
1055 .encode_slave_id = false,
1056 .verify = NULL,
1057 .wrapper = mlx4_MODIFY_CQ_wrapper
1058 },
1059 {
1060 .opcode = MLX4_CMD_SW2HW_SRQ,
1061 .has_inbox = true,
1062 .has_outbox = false,
1063 .out_is_imm = false,
1064 .encode_slave_id = true,
1065 .verify = NULL,
1066 .wrapper = mlx4_SW2HW_SRQ_wrapper
1067 },
1068 {
1069 .opcode = MLX4_CMD_HW2SW_SRQ,
1070 .has_inbox = false,
1071 .has_outbox = false,
1072 .out_is_imm = false,
1073 .encode_slave_id = false,
1074 .verify = NULL,
1075 .wrapper = mlx4_HW2SW_SRQ_wrapper
1076 },
1077 {
1078 .opcode = MLX4_CMD_QUERY_SRQ,
1079 .has_inbox = false,
1080 .has_outbox = true,
1081 .out_is_imm = false,
1082 .encode_slave_id = false,
1083 .verify = NULL,
1084 .wrapper = mlx4_QUERY_SRQ_wrapper
1085 },
1086 {
1087 .opcode = MLX4_CMD_ARM_SRQ,
1088 .has_inbox = false,
1089 .has_outbox = false,
1090 .out_is_imm = false,
1091 .encode_slave_id = false,
1092 .verify = NULL,
1093 .wrapper = mlx4_ARM_SRQ_wrapper
1094 },
1095 {
1096 .opcode = MLX4_CMD_RST2INIT_QP,
1097 .has_inbox = true,
1098 .has_outbox = false,
1099 .out_is_imm = false,
1100 .encode_slave_id = true,
1101 .verify = NULL,
1102 .wrapper = mlx4_RST2INIT_QP_wrapper
1103 },
1104 {
1105 .opcode = MLX4_CMD_INIT2INIT_QP,
1106 .has_inbox = true,
1107 .has_outbox = false,
1108 .out_is_imm = false,
1109 .encode_slave_id = false,
1110 .verify = NULL,
1111 .wrapper = mlx4_INIT2INIT_QP_wrapper
1112 },
1113 {
1114 .opcode = MLX4_CMD_INIT2RTR_QP,
1115 .has_inbox = true,
1116 .has_outbox = false,
1117 .out_is_imm = false,
1118 .encode_slave_id = false,
1119 .verify = NULL,
1120 .wrapper = mlx4_INIT2RTR_QP_wrapper
1121 },
1122 {
1123 .opcode = MLX4_CMD_RTR2RTS_QP,
1124 .has_inbox = true,
1125 .has_outbox = false,
1126 .out_is_imm = false,
1127 .encode_slave_id = false,
1128 .verify = NULL,
1129 .wrapper = mlx4_RTR2RTS_QP_wrapper
1130 },
1131 {
1132 .opcode = MLX4_CMD_RTS2RTS_QP,
1133 .has_inbox = true,
1134 .has_outbox = false,
1135 .out_is_imm = false,
1136 .encode_slave_id = false,
1137 .verify = NULL,
1138 .wrapper = mlx4_RTS2RTS_QP_wrapper
1139 },
1140 {
1141 .opcode = MLX4_CMD_SQERR2RTS_QP,
1142 .has_inbox = true,
1143 .has_outbox = false,
1144 .out_is_imm = false,
1145 .encode_slave_id = false,
1146 .verify = NULL,
1147 .wrapper = mlx4_SQERR2RTS_QP_wrapper
1148 },
1149 {
1150 .opcode = MLX4_CMD_2ERR_QP,
1151 .has_inbox = false,
1152 .has_outbox = false,
1153 .out_is_imm = false,
1154 .encode_slave_id = false,
1155 .verify = NULL,
1156 .wrapper = mlx4_GEN_QP_wrapper
1157 },
1158 {
1159 .opcode = MLX4_CMD_RTS2SQD_QP,
1160 .has_inbox = false,
1161 .has_outbox = false,
1162 .out_is_imm = false,
1163 .encode_slave_id = false,
1164 .verify = NULL,
1165 .wrapper = mlx4_GEN_QP_wrapper
1166 },
1167 {
1168 .opcode = MLX4_CMD_SQD2SQD_QP,
1169 .has_inbox = true,
1170 .has_outbox = false,
1171 .out_is_imm = false,
1172 .encode_slave_id = false,
1173 .verify = NULL,
1174 .wrapper = mlx4_SQD2SQD_QP_wrapper
1175 },
1176 {
1177 .opcode = MLX4_CMD_SQD2RTS_QP,
1178 .has_inbox = true,
1179 .has_outbox = false,
1180 .out_is_imm = false,
1181 .encode_slave_id = false,
1182 .verify = NULL,
1183 .wrapper = mlx4_SQD2RTS_QP_wrapper
1184 },
1185 {
1186 .opcode = MLX4_CMD_2RST_QP,
1187 .has_inbox = false,
1188 .has_outbox = false,
1189 .out_is_imm = false,
1190 .encode_slave_id = false,
1191 .verify = NULL,
1192 .wrapper = mlx4_2RST_QP_wrapper
1193 },
1194 {
1195 .opcode = MLX4_CMD_QUERY_QP,
1196 .has_inbox = false,
1197 .has_outbox = true,
1198 .out_is_imm = false,
1199 .encode_slave_id = false,
1200 .verify = NULL,
1201 .wrapper = mlx4_GEN_QP_wrapper
1202 },
1203 {
1204 .opcode = MLX4_CMD_SUSPEND_QP,
1205 .has_inbox = false,
1206 .has_outbox = false,
1207 .out_is_imm = false,
1208 .encode_slave_id = false,
1209 .verify = NULL,
1210 .wrapper = mlx4_GEN_QP_wrapper
1211 },
1212 {
1213 .opcode = MLX4_CMD_UNSUSPEND_QP,
1214 .has_inbox = false,
1215 .has_outbox = false,
1216 .out_is_imm = false,
1217 .encode_slave_id = false,
1218 .verify = NULL,
1219 .wrapper = mlx4_GEN_QP_wrapper
1220 },
1221 {
1222 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1223 .has_inbox = false,
1224 .has_outbox = false,
1225 .out_is_imm = false,
1226 .encode_slave_id = false,
1227 .verify = NULL, /* XXX verify: only demux can do this */
1228 .wrapper = NULL
1229 },
1230 {
1231 .opcode = MLX4_CMD_MAD_IFC,
1232 .has_inbox = true,
1233 .has_outbox = true,
1234 .out_is_imm = false,
1235 .encode_slave_id = false,
1236 .verify = NULL,
1237 .wrapper = mlx4_MAD_IFC_wrapper
1238 },
1239 {
1240 .opcode = MLX4_CMD_QUERY_IF_STAT,
1241 .has_inbox = false,
1242 .has_outbox = true,
1243 .out_is_imm = false,
1244 .encode_slave_id = false,
1245 .verify = NULL,
1246 .wrapper = mlx4_QUERY_IF_STAT_wrapper
1247 },
1248 /* Native multicast commands are not available for guests */
1249 {
1250 .opcode = MLX4_CMD_QP_ATTACH,
1251 .has_inbox = true,
1252 .has_outbox = false,
1253 .out_is_imm = false,
1254 .encode_slave_id = false,
1255 .verify = NULL,
1256 .wrapper = mlx4_QP_ATTACH_wrapper
1257 },
1258 {
1259 .opcode = MLX4_CMD_PROMISC,
1260 .has_inbox = false,
1261 .has_outbox = false,
1262 .out_is_imm = false,
1263 .encode_slave_id = false,
1264 .verify = NULL,
1265 .wrapper = mlx4_PROMISC_wrapper
1266 },
1267 /* Ethernet specific commands */
1268 {
1269 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1270 .has_inbox = true,
1271 .has_outbox = false,
1272 .out_is_imm = false,
1273 .encode_slave_id = false,
1274 .verify = NULL,
1275 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1276 },
1277 {
1278 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1279 .has_inbox = false,
1280 .has_outbox = false,
1281 .out_is_imm = false,
1282 .encode_slave_id = false,
1283 .verify = NULL,
1284 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1285 },
1286 {
1287 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1288 .has_inbox = false,
1289 .has_outbox = true,
1290 .out_is_imm = false,
1291 .encode_slave_id = false,
1292 .verify = NULL,
1293 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1294 },
1295 {
1296 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1297 .has_inbox = false,
1298 .has_outbox = false,
1299 .out_is_imm = false,
1300 .encode_slave_id = false,
1301 .verify = NULL,
1302 .wrapper = NULL
1303 },
1304 /* flow steering commands */
1305 {
1306 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1307 .has_inbox = true,
1308 .has_outbox = false,
1309 .out_is_imm = true,
1310 .encode_slave_id = false,
1311 .verify = NULL,
1312 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1313 },
1314 {
1315 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1316 .has_inbox = false,
1317 .has_outbox = false,
1318 .out_is_imm = false,
1319 .encode_slave_id = false,
1320 .verify = NULL,
1321 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1322 },
1323 };
1324
1325 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1326 struct mlx4_vhcr_cmd *in_vhcr)
1327 {
1328 struct mlx4_priv *priv = mlx4_priv(dev);
1329 struct mlx4_cmd_info *cmd = NULL;
1330 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1331 struct mlx4_vhcr *vhcr;
1332 struct mlx4_cmd_mailbox *inbox = NULL;
1333 struct mlx4_cmd_mailbox *outbox = NULL;
1334 u64 in_param;
1335 u64 out_param;
1336 int ret = 0;
1337 int i;
1338 int err = 0;
1339
1340 /* Create sw representation of Virtual HCR */
1341 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1342 if (!vhcr)
1343 return -ENOMEM;
1344
1345 /* DMA in the vHCR */
1346 if (!in_vhcr) {
1347 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1348 priv->mfunc.master.slave_state[slave].vhcr_dma,
1349 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1350 MLX4_ACCESS_MEM_ALIGN), 1);
1351 if (ret) {
1352 mlx4_err(dev, "%s:Failed reading vhcr"
1353 "ret: 0x%x\n", __func__, ret);
1354 kfree(vhcr);
1355 return ret;
1356 }
1357 }
1358
1359 /* Fill SW VHCR fields */
1360 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1361 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1362 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1363 vhcr->token = be16_to_cpu(vhcr_cmd->token);
1364 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1365 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1366 vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1367
1368 /* Lookup command */
1369 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1370 if (vhcr->op == cmd_info[i].opcode) {
1371 cmd = &cmd_info[i];
1372 break;
1373 }
1374 }
1375 if (!cmd) {
1376 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1377 vhcr->op, slave);
1378 vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1379 goto out_status;
1380 }
1381
1382 /* Read inbox */
1383 if (cmd->has_inbox) {
1384 vhcr->in_param &= INBOX_MASK;
1385 inbox = mlx4_alloc_cmd_mailbox(dev);
1386 if (IS_ERR(inbox)) {
1387 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1388 inbox = NULL;
1389 goto out_status;
1390 }
1391
1392 if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1393 vhcr->in_param,
1394 MLX4_MAILBOX_SIZE, 1)) {
1395 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1396 __func__, cmd->opcode);
1397 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1398 goto out_status;
1399 }
1400 }
1401
1402 /* Apply permission and bound checks if applicable */
1403 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1404 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
1405 "checks for resource_id:%d\n", vhcr->op, slave,
1406 vhcr->in_modifier);
1407 vhcr_cmd->status = CMD_STAT_BAD_OP;
1408 goto out_status;
1409 }
1410
1411 /* Allocate outbox */
1412 if (cmd->has_outbox) {
1413 outbox = mlx4_alloc_cmd_mailbox(dev);
1414 if (IS_ERR(outbox)) {
1415 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1416 outbox = NULL;
1417 goto out_status;
1418 }
1419 }
1420
1421 /* Execute the command! */
1422 if (cmd->wrapper) {
1423 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1424 cmd);
1425 if (cmd->out_is_imm)
1426 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1427 } else {
1428 in_param = cmd->has_inbox ? (u64) inbox->dma :
1429 vhcr->in_param;
1430 out_param = cmd->has_outbox ? (u64) outbox->dma :
1431 vhcr->out_param;
1432 err = __mlx4_cmd(dev, in_param, &out_param,
1433 cmd->out_is_imm, vhcr->in_modifier,
1434 vhcr->op_modifier, vhcr->op,
1435 MLX4_CMD_TIME_CLASS_A,
1436 MLX4_CMD_NATIVE);
1437
1438 if (cmd->out_is_imm) {
1439 vhcr->out_param = out_param;
1440 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1441 }
1442 }
1443
1444 if (err) {
1445 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
1446 " error:%d, status %d\n",
1447 vhcr->op, slave, vhcr->errno, err);
1448 vhcr_cmd->status = mlx4_errno_to_status(err);
1449 goto out_status;
1450 }
1451
1452
1453 /* Write outbox if command completed successfully */
1454 if (cmd->has_outbox && !vhcr_cmd->status) {
1455 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1456 vhcr->out_param,
1457 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1458 if (ret) {
1459 /* If we failed to write back the outbox after the
1460 *command was successfully executed, we must fail this
1461 * slave, as it is now in undefined state */
1462 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1463 goto out;
1464 }
1465 }
1466
1467 out_status:
1468 /* DMA back vhcr result */
1469 if (!in_vhcr) {
1470 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1471 priv->mfunc.master.slave_state[slave].vhcr_dma,
1472 ALIGN(sizeof(struct mlx4_vhcr),
1473 MLX4_ACCESS_MEM_ALIGN),
1474 MLX4_CMD_WRAPPED);
1475 if (ret)
1476 mlx4_err(dev, "%s:Failed writing vhcr result\n",
1477 __func__);
1478 else if (vhcr->e_bit &&
1479 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1480 mlx4_warn(dev, "Failed to generate command completion "
1481 "eqe for slave %d\n", slave);
1482 }
1483
1484 out:
1485 kfree(vhcr);
1486 mlx4_free_cmd_mailbox(dev, inbox);
1487 mlx4_free_cmd_mailbox(dev, outbox);
1488 return ret;
1489 }
1490
1491 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1492 {
1493 int port, err;
1494 struct mlx4_vport_state *vp_admin;
1495 struct mlx4_vport_oper_state *vp_oper;
1496
1497 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1498 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1499 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1500 vp_oper->state = *vp_admin;
1501 if (MLX4_VGT != vp_admin->default_vlan) {
1502 err = __mlx4_register_vlan(&priv->dev, port,
1503 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1504 if (err) {
1505 vp_oper->vlan_idx = NO_INDX;
1506 mlx4_warn((&priv->dev),
1507 "No vlan resorces slave %d, port %d\n",
1508 slave, port);
1509 return err;
1510 }
1511 mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
1512 (int)(vp_oper->state.default_vlan),
1513 vp_oper->vlan_idx, slave, port);
1514 }
1515 if (vp_admin->spoofchk) {
1516 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1517 port,
1518 vp_admin->mac);
1519 if (0 > vp_oper->mac_idx) {
1520 err = vp_oper->mac_idx;
1521 vp_oper->mac_idx = NO_INDX;
1522 mlx4_warn((&priv->dev),
1523 "No mac resorces slave %d, port %d\n",
1524 slave, port);
1525 return err;
1526 }
1527 mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
1528 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1529 }
1530 }
1531 return 0;
1532 }
1533
1534 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1535 {
1536 int port;
1537 struct mlx4_vport_oper_state *vp_oper;
1538
1539 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1540 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1541 if (NO_INDX != vp_oper->vlan_idx) {
1542 __mlx4_unregister_vlan(&priv->dev,
1543 port, vp_oper->vlan_idx);
1544 vp_oper->vlan_idx = NO_INDX;
1545 }
1546 if (NO_INDX != vp_oper->mac_idx) {
1547 __mlx4_unregister_mac(&priv->dev, port, vp_oper->mac_idx);
1548 vp_oper->mac_idx = NO_INDX;
1549 }
1550 }
1551 return;
1552 }
1553
1554 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1555 u16 param, u8 toggle)
1556 {
1557 struct mlx4_priv *priv = mlx4_priv(dev);
1558 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1559 u32 reply;
1560 u8 is_going_down = 0;
1561 int i;
1562 unsigned long flags;
1563
1564 slave_state[slave].comm_toggle ^= 1;
1565 reply = (u32) slave_state[slave].comm_toggle << 31;
1566 if (toggle != slave_state[slave].comm_toggle) {
1567 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
1568 "STATE COMPROMISIED ***\n", toggle, slave);
1569 goto reset_slave;
1570 }
1571 if (cmd == MLX4_COMM_CMD_RESET) {
1572 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1573 slave_state[slave].active = false;
1574 mlx4_master_deactivate_admin_state(priv, slave);
1575 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1576 slave_state[slave].event_eq[i].eqn = -1;
1577 slave_state[slave].event_eq[i].token = 0;
1578 }
1579 /*check if we are in the middle of FLR process,
1580 if so return "retry" status to the slave*/
1581 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1582 goto inform_slave_state;
1583
1584 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1585
1586 /* write the version in the event field */
1587 reply |= mlx4_comm_get_version();
1588
1589 goto reset_slave;
1590 }
1591 /*command from slave in the middle of FLR*/
1592 if (cmd != MLX4_COMM_CMD_RESET &&
1593 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1594 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
1595 "in the middle of FLR\n", slave, cmd);
1596 return;
1597 }
1598
1599 switch (cmd) {
1600 case MLX4_COMM_CMD_VHCR0:
1601 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1602 goto reset_slave;
1603 slave_state[slave].vhcr_dma = ((u64) param) << 48;
1604 priv->mfunc.master.slave_state[slave].cookie = 0;
1605 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1606 break;
1607 case MLX4_COMM_CMD_VHCR1:
1608 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1609 goto reset_slave;
1610 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1611 break;
1612 case MLX4_COMM_CMD_VHCR2:
1613 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1614 goto reset_slave;
1615 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1616 break;
1617 case MLX4_COMM_CMD_VHCR_EN:
1618 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1619 goto reset_slave;
1620 slave_state[slave].vhcr_dma |= param;
1621 if (mlx4_master_activate_admin_state(priv, slave))
1622 goto reset_slave;
1623 slave_state[slave].active = true;
1624 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1625 break;
1626 case MLX4_COMM_CMD_VHCR_POST:
1627 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1628 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1629 goto reset_slave;
1630
1631 mutex_lock(&priv->cmd.slave_cmd_mutex);
1632 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1633 mlx4_err(dev, "Failed processing vhcr for slave:%d,"
1634 " resetting slave.\n", slave);
1635 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1636 goto reset_slave;
1637 }
1638 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1639 break;
1640 default:
1641 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
1642 goto reset_slave;
1643 }
1644 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1645 if (!slave_state[slave].is_slave_going_down)
1646 slave_state[slave].last_cmd = cmd;
1647 else
1648 is_going_down = 1;
1649 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1650 if (is_going_down) {
1651 mlx4_warn(dev, "Slave is going down aborting command(%d)"
1652 " executing from slave:%d\n",
1653 cmd, slave);
1654 return;
1655 }
1656 __raw_writel((__force u32) cpu_to_be32(reply),
1657 &priv->mfunc.comm[slave].slave_read);
1658 mmiowb();
1659
1660 return;
1661
1662 reset_slave:
1663 /* cleanup any slave resources */
1664 mlx4_delete_all_resources_for_slave(dev, slave);
1665 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1666 if (!slave_state[slave].is_slave_going_down)
1667 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
1668 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1669 /*with slave in the middle of flr, no need to clean resources again.*/
1670 inform_slave_state:
1671 memset(&slave_state[slave].event_eq, 0,
1672 sizeof(struct mlx4_slave_event_eq_info));
1673 __raw_writel((__force u32) cpu_to_be32(reply),
1674 &priv->mfunc.comm[slave].slave_read);
1675 wmb();
1676 }
1677
1678 /* master command processing */
1679 void mlx4_master_comm_channel(struct work_struct *work)
1680 {
1681 struct mlx4_mfunc_master_ctx *master =
1682 container_of(work,
1683 struct mlx4_mfunc_master_ctx,
1684 comm_work);
1685 struct mlx4_mfunc *mfunc =
1686 container_of(master, struct mlx4_mfunc, master);
1687 struct mlx4_priv *priv =
1688 container_of(mfunc, struct mlx4_priv, mfunc);
1689 struct mlx4_dev *dev = &priv->dev;
1690 __be32 *bit_vec;
1691 u32 comm_cmd;
1692 u32 vec;
1693 int i, j, slave;
1694 int toggle;
1695 int served = 0;
1696 int reported = 0;
1697 u32 slt;
1698
1699 bit_vec = master->comm_arm_bit_vector;
1700 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
1701 vec = be32_to_cpu(bit_vec[i]);
1702 for (j = 0; j < 32; j++) {
1703 if (!(vec & (1 << j)))
1704 continue;
1705 ++reported;
1706 slave = (i * 32) + j;
1707 comm_cmd = swab32(readl(
1708 &mfunc->comm[slave].slave_write));
1709 slt = swab32(readl(&mfunc->comm[slave].slave_read))
1710 >> 31;
1711 toggle = comm_cmd >> 31;
1712 if (toggle != slt) {
1713 if (master->slave_state[slave].comm_toggle
1714 != slt) {
1715 printk(KERN_INFO "slave %d out of sync."
1716 " read toggle %d, state toggle %d. "
1717 "Resynching.\n", slave, slt,
1718 master->slave_state[slave].comm_toggle);
1719 master->slave_state[slave].comm_toggle =
1720 slt;
1721 }
1722 mlx4_master_do_cmd(dev, slave,
1723 comm_cmd >> 16 & 0xff,
1724 comm_cmd & 0xffff, toggle);
1725 ++served;
1726 }
1727 }
1728 }
1729
1730 if (reported && reported != served)
1731 mlx4_warn(dev, "Got command event with bitmask from %d slaves"
1732 " but %d were served\n",
1733 reported, served);
1734
1735 if (mlx4_ARM_COMM_CHANNEL(dev))
1736 mlx4_warn(dev, "Failed to arm comm channel events\n");
1737 }
1738
1739 static int sync_toggles(struct mlx4_dev *dev)
1740 {
1741 struct mlx4_priv *priv = mlx4_priv(dev);
1742 int wr_toggle;
1743 int rd_toggle;
1744 unsigned long end;
1745
1746 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
1747 end = jiffies + msecs_to_jiffies(5000);
1748
1749 while (time_before(jiffies, end)) {
1750 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
1751 if (rd_toggle == wr_toggle) {
1752 priv->cmd.comm_toggle = rd_toggle;
1753 return 0;
1754 }
1755
1756 cond_resched();
1757 }
1758
1759 /*
1760 * we could reach here if for example the previous VM using this
1761 * function misbehaved and left the channel with unsynced state. We
1762 * should fix this here and give this VM a chance to use a properly
1763 * synced channel
1764 */
1765 mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
1766 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
1767 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
1768 priv->cmd.comm_toggle = 0;
1769
1770 return 0;
1771 }
1772
1773 int mlx4_multi_func_init(struct mlx4_dev *dev)
1774 {
1775 struct mlx4_priv *priv = mlx4_priv(dev);
1776 struct mlx4_slave_state *s_state;
1777 int i, j, err, port;
1778
1779 if (mlx4_is_master(dev))
1780 priv->mfunc.comm =
1781 ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
1782 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
1783 else
1784 priv->mfunc.comm =
1785 ioremap(pci_resource_start(dev->pdev, 2) +
1786 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1787 if (!priv->mfunc.comm) {
1788 mlx4_err(dev, "Couldn't map communication vector.\n");
1789 goto err_vhcr;
1790 }
1791
1792 if (mlx4_is_master(dev)) {
1793 priv->mfunc.master.slave_state =
1794 kzalloc(dev->num_slaves *
1795 sizeof(struct mlx4_slave_state), GFP_KERNEL);
1796 if (!priv->mfunc.master.slave_state)
1797 goto err_comm;
1798
1799 priv->mfunc.master.vf_admin =
1800 kzalloc(dev->num_slaves *
1801 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
1802 if (!priv->mfunc.master.vf_admin)
1803 goto err_comm_admin;
1804
1805 priv->mfunc.master.vf_oper =
1806 kzalloc(dev->num_slaves *
1807 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
1808 if (!priv->mfunc.master.vf_oper)
1809 goto err_comm_oper;
1810
1811 for (i = 0; i < dev->num_slaves; ++i) {
1812 s_state = &priv->mfunc.master.slave_state[i];
1813 s_state->last_cmd = MLX4_COMM_CMD_RESET;
1814 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
1815 s_state->event_eq[j].eqn = -1;
1816 __raw_writel((__force u32) 0,
1817 &priv->mfunc.comm[i].slave_write);
1818 __raw_writel((__force u32) 0,
1819 &priv->mfunc.comm[i].slave_read);
1820 mmiowb();
1821 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1822 s_state->vlan_filter[port] =
1823 kzalloc(sizeof(struct mlx4_vlan_fltr),
1824 GFP_KERNEL);
1825 if (!s_state->vlan_filter[port]) {
1826 if (--port)
1827 kfree(s_state->vlan_filter[port]);
1828 goto err_slaves;
1829 }
1830 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
1831 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
1832 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
1833 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
1834 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
1835 }
1836 spin_lock_init(&s_state->lock);
1837 }
1838
1839 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
1840 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
1841 INIT_WORK(&priv->mfunc.master.comm_work,
1842 mlx4_master_comm_channel);
1843 INIT_WORK(&priv->mfunc.master.slave_event_work,
1844 mlx4_gen_slave_eqe);
1845 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
1846 mlx4_master_handle_slave_flr);
1847 spin_lock_init(&priv->mfunc.master.slave_state_lock);
1848 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
1849 priv->mfunc.master.comm_wq =
1850 create_singlethread_workqueue("mlx4_comm");
1851 if (!priv->mfunc.master.comm_wq)
1852 goto err_slaves;
1853
1854 if (mlx4_init_resource_tracker(dev))
1855 goto err_thread;
1856
1857 err = mlx4_ARM_COMM_CHANNEL(dev);
1858 if (err) {
1859 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
1860 err);
1861 goto err_resource;
1862 }
1863
1864 } else {
1865 err = sync_toggles(dev);
1866 if (err) {
1867 mlx4_err(dev, "Couldn't sync toggles\n");
1868 goto err_comm;
1869 }
1870 }
1871 return 0;
1872
1873 err_resource:
1874 mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
1875 err_thread:
1876 flush_workqueue(priv->mfunc.master.comm_wq);
1877 destroy_workqueue(priv->mfunc.master.comm_wq);
1878 err_slaves:
1879 while (--i) {
1880 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1881 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1882 }
1883 kfree(priv->mfunc.master.vf_oper);
1884 err_comm_oper:
1885 kfree(priv->mfunc.master.vf_admin);
1886 err_comm_admin:
1887 kfree(priv->mfunc.master.slave_state);
1888 err_comm:
1889 iounmap(priv->mfunc.comm);
1890 err_vhcr:
1891 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1892 priv->mfunc.vhcr,
1893 priv->mfunc.vhcr_dma);
1894 priv->mfunc.vhcr = NULL;
1895 return -ENOMEM;
1896 }
1897
1898 int mlx4_cmd_init(struct mlx4_dev *dev)
1899 {
1900 struct mlx4_priv *priv = mlx4_priv(dev);
1901
1902 mutex_init(&priv->cmd.hcr_mutex);
1903 mutex_init(&priv->cmd.slave_cmd_mutex);
1904 sema_init(&priv->cmd.poll_sem, 1);
1905 priv->cmd.use_events = 0;
1906 priv->cmd.toggle = 1;
1907
1908 priv->cmd.hcr = NULL;
1909 priv->mfunc.vhcr = NULL;
1910
1911 if (!mlx4_is_slave(dev)) {
1912 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
1913 MLX4_HCR_BASE, MLX4_HCR_SIZE);
1914 if (!priv->cmd.hcr) {
1915 mlx4_err(dev, "Couldn't map command register.\n");
1916 return -ENOMEM;
1917 }
1918 }
1919
1920 if (mlx4_is_mfunc(dev)) {
1921 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1922 &priv->mfunc.vhcr_dma,
1923 GFP_KERNEL);
1924 if (!priv->mfunc.vhcr)
1925 goto err_hcr;
1926 }
1927
1928 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
1929 MLX4_MAILBOX_SIZE,
1930 MLX4_MAILBOX_SIZE, 0);
1931 if (!priv->cmd.pool)
1932 goto err_vhcr;
1933
1934 return 0;
1935
1936 err_vhcr:
1937 if (mlx4_is_mfunc(dev))
1938 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1939 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1940 priv->mfunc.vhcr = NULL;
1941
1942 err_hcr:
1943 if (!mlx4_is_slave(dev))
1944 iounmap(priv->cmd.hcr);
1945 return -ENOMEM;
1946 }
1947
1948 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1949 {
1950 struct mlx4_priv *priv = mlx4_priv(dev);
1951 int i, port;
1952
1953 if (mlx4_is_master(dev)) {
1954 flush_workqueue(priv->mfunc.master.comm_wq);
1955 destroy_workqueue(priv->mfunc.master.comm_wq);
1956 for (i = 0; i < dev->num_slaves; i++) {
1957 for (port = 1; port <= MLX4_MAX_PORTS; port++)
1958 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1959 }
1960 kfree(priv->mfunc.master.slave_state);
1961 kfree(priv->mfunc.master.vf_admin);
1962 kfree(priv->mfunc.master.vf_oper);
1963 }
1964
1965 iounmap(priv->mfunc.comm);
1966 }
1967
1968 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
1969 {
1970 struct mlx4_priv *priv = mlx4_priv(dev);
1971
1972 pci_pool_destroy(priv->cmd.pool);
1973
1974 if (!mlx4_is_slave(dev))
1975 iounmap(priv->cmd.hcr);
1976 if (mlx4_is_mfunc(dev))
1977 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1978 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1979 priv->mfunc.vhcr = NULL;
1980 }
1981
1982 /*
1983 * Switch to using events to issue FW commands (can only be called
1984 * after event queue for command events has been initialized).
1985 */
1986 int mlx4_cmd_use_events(struct mlx4_dev *dev)
1987 {
1988 struct mlx4_priv *priv = mlx4_priv(dev);
1989 int i;
1990 int err = 0;
1991
1992 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
1993 sizeof (struct mlx4_cmd_context),
1994 GFP_KERNEL);
1995 if (!priv->cmd.context)
1996 return -ENOMEM;
1997
1998 for (i = 0; i < priv->cmd.max_cmds; ++i) {
1999 priv->cmd.context[i].token = i;
2000 priv->cmd.context[i].next = i + 1;
2001 }
2002
2003 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2004 priv->cmd.free_head = 0;
2005
2006 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2007 spin_lock_init(&priv->cmd.context_lock);
2008
2009 for (priv->cmd.token_mask = 1;
2010 priv->cmd.token_mask < priv->cmd.max_cmds;
2011 priv->cmd.token_mask <<= 1)
2012 ; /* nothing */
2013 --priv->cmd.token_mask;
2014
2015 down(&priv->cmd.poll_sem);
2016 priv->cmd.use_events = 1;
2017
2018 return err;
2019 }
2020
2021 /*
2022 * Switch back to polling (used when shutting down the device)
2023 */
2024 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2025 {
2026 struct mlx4_priv *priv = mlx4_priv(dev);
2027 int i;
2028
2029 priv->cmd.use_events = 0;
2030
2031 for (i = 0; i < priv->cmd.max_cmds; ++i)
2032 down(&priv->cmd.event_sem);
2033
2034 kfree(priv->cmd.context);
2035
2036 up(&priv->cmd.poll_sem);
2037 }
2038
2039 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2040 {
2041 struct mlx4_cmd_mailbox *mailbox;
2042
2043 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2044 if (!mailbox)
2045 return ERR_PTR(-ENOMEM);
2046
2047 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2048 &mailbox->dma);
2049 if (!mailbox->buf) {
2050 kfree(mailbox);
2051 return ERR_PTR(-ENOMEM);
2052 }
2053
2054 return mailbox;
2055 }
2056 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2057
2058 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2059 struct mlx4_cmd_mailbox *mailbox)
2060 {
2061 if (!mailbox)
2062 return;
2063
2064 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2065 kfree(mailbox);
2066 }
2067 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2068
2069 u32 mlx4_comm_get_version(void)
2070 {
2071 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2072 }
2073
2074 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2075 {
2076 if ((vf < 0) || (vf >= dev->num_vfs)) {
2077 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs);
2078 return -EINVAL;
2079 }
2080
2081 return vf+1;
2082 }
2083
2084 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2085 {
2086 struct mlx4_priv *priv = mlx4_priv(dev);
2087 struct mlx4_vport_state *s_info;
2088 int slave;
2089
2090 if (!mlx4_is_master(dev))
2091 return -EPROTONOSUPPORT;
2092
2093 slave = mlx4_get_slave_indx(dev, vf);
2094 if (slave < 0)
2095 return -EINVAL;
2096
2097 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2098 s_info->mac = mac;
2099 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2100 vf, port, s_info->mac);
2101 return 0;
2102 }
2103 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2104
2105 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2106 {
2107 struct mlx4_priv *priv = mlx4_priv(dev);
2108 struct mlx4_vport_state *s_info;
2109 int slave;
2110
2111 if ((!mlx4_is_master(dev)) ||
2112 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2113 return -EPROTONOSUPPORT;
2114
2115 if ((vlan > 4095) || (qos > 7))
2116 return -EINVAL;
2117
2118 slave = mlx4_get_slave_indx(dev, vf);
2119 if (slave < 0)
2120 return -EINVAL;
2121
2122 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2123 if ((0 == vlan) && (0 == qos))
2124 s_info->default_vlan = MLX4_VGT;
2125 else
2126 s_info->default_vlan = vlan;
2127 s_info->default_qos = qos;
2128 return 0;
2129 }
2130 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2131
2132 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2133 {
2134 struct mlx4_priv *priv = mlx4_priv(dev);
2135 struct mlx4_vport_state *s_info;
2136 int slave;
2137
2138 if ((!mlx4_is_master(dev)) ||
2139 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2140 return -EPROTONOSUPPORT;
2141
2142 slave = mlx4_get_slave_indx(dev, vf);
2143 if (slave < 0)
2144 return -EINVAL;
2145
2146 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2147 s_info->spoofchk = setting;
2148
2149 return 0;
2150 }
2151 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2152
2153 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2154 {
2155 struct mlx4_priv *priv = mlx4_priv(dev);
2156 struct mlx4_vport_state *s_info;
2157 int slave;
2158
2159 if (!mlx4_is_master(dev))
2160 return -EPROTONOSUPPORT;
2161
2162 slave = mlx4_get_slave_indx(dev, vf);
2163 if (slave < 0)
2164 return -EINVAL;
2165
2166 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2167 ivf->vf = vf;
2168
2169 /* need to convert it to a func */
2170 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2171 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2172 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2173 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2174 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2175 ivf->mac[5] = ((s_info->mac) & 0xff);
2176
2177 ivf->vlan = s_info->default_vlan;
2178 ivf->qos = s_info->default_qos;
2179 ivf->tx_rate = s_info->tx_rate;
2180 ivf->spoofchk = s_info->spoofchk;
2181
2182 return 0;
2183 }
2184 EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
This page took 0.074058 seconds and 6 git commands to generate.