Merge branch 'acpi-lpss'
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53 };
54
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56 u8 subsystem)
57 {
58 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
61
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66 return false;
67
68 return true;
69 }
70
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 {
73 return wrb->payload.embedded_payload;
74 }
75
76 static void be_mcc_notify(struct be_adapter *adapter)
77 {
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
79 u32 val = 0;
80
81 if (be_error(adapter))
82 return;
83
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86
87 wmb();
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89 }
90
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
93 * little endian) */
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95 {
96 u32 flags;
97
98 if (compl->flags != 0) {
99 flags = le32_to_cpu(compl->flags);
100 if (flags & CQE_FLAGS_VALID_MASK) {
101 compl->flags = flags;
102 return true;
103 }
104 }
105 return false;
106 }
107
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
110 {
111 compl->flags = 0;
112 }
113
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
115 {
116 unsigned long addr;
117
118 addr = tag1;
119 addr = ((addr << 16) << 16) | tag0;
120 return (void *)addr;
121 }
122
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 struct be_mcc_compl *compl)
125 {
126 u16 compl_status, extd_status;
127 struct be_cmd_resp_hdr *resp_hdr;
128 u8 opcode = 0, subsystem = 0;
129
130 /* Just swap the status to host endian; mcc tag is opaquely copied
131 * from mcc_wrb */
132 be_dws_le_to_cpu(compl, 4);
133
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 CQE_STATUS_COMPL_MASK;
136
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
138
139 if (resp_hdr) {
140 opcode = resp_hdr->opcode;
141 subsystem = resp_hdr->subsystem;
142 }
143
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl);
149 }
150
151 if (compl_status == MCC_STATUS_SUCCESS) {
152 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154 (subsystem == CMD_SUBSYSTEM_ETH)) {
155 be_parse_stats(adapter);
156 adapter->stats_cmd_sent = false;
157 }
158 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159 subsystem == CMD_SUBSYSTEM_COMMON) {
160 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
161 (void *)resp_hdr;
162 adapter->drv_stats.be_on_die_temperature =
163 resp->on_die_temperature;
164 }
165 } else {
166 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167 adapter->be_get_temp_freq = 0;
168
169 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
171 goto done;
172
173 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174 dev_warn(&adapter->pdev->dev,
175 "VF is not privileged to issue opcode %d-%d\n",
176 opcode, subsystem);
177 } else {
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK;
180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status);
183 }
184 }
185 done:
186 return compl_status;
187 }
188
189 /* Link state evt is a string of bytes; no need for endian swapping */
190 static void be_async_link_state_process(struct be_adapter *adapter,
191 struct be_async_event_link_state *evt)
192 {
193 /* When link status changes, link speed must be re-queried from FW */
194 adapter->phy.link_speed = -1;
195
196 /* Ignore physical link event */
197 if (lancer_chip(adapter) &&
198 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
199 return;
200
201 /* For the initial link status do not rely on the ASYNC event as
202 * it may not be received in some cases.
203 */
204 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
205 be_link_status_update(adapter, evt->port_link_status);
206 }
207
208 /* Grp5 CoS Priority evt */
209 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
210 struct be_async_event_grp5_cos_priority *evt)
211 {
212 if (evt->valid) {
213 adapter->vlan_prio_bmap = evt->available_priority_bmap;
214 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
215 adapter->recommended_prio =
216 evt->reco_default_priority << VLAN_PRIO_SHIFT;
217 }
218 }
219
220 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
221 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
222 struct be_async_event_grp5_qos_link_speed *evt)
223 {
224 if (adapter->phy.link_speed >= 0 &&
225 evt->physical_port == adapter->port_num)
226 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
227 }
228
229 /*Grp5 PVID evt*/
230 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
231 struct be_async_event_grp5_pvid_state *evt)
232 {
233 if (evt->enabled)
234 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
235 else
236 adapter->pvid = 0;
237 }
238
239 static void be_async_grp5_evt_process(struct be_adapter *adapter,
240 u32 trailer, struct be_mcc_compl *evt)
241 {
242 u8 event_type = 0;
243
244 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
245 ASYNC_TRAILER_EVENT_TYPE_MASK;
246
247 switch (event_type) {
248 case ASYNC_EVENT_COS_PRIORITY:
249 be_async_grp5_cos_priority_process(adapter,
250 (struct be_async_event_grp5_cos_priority *)evt);
251 break;
252 case ASYNC_EVENT_QOS_SPEED:
253 be_async_grp5_qos_speed_process(adapter,
254 (struct be_async_event_grp5_qos_link_speed *)evt);
255 break;
256 case ASYNC_EVENT_PVID_STATE:
257 be_async_grp5_pvid_state_process(adapter,
258 (struct be_async_event_grp5_pvid_state *)evt);
259 break;
260 default:
261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
262 break;
263 }
264 }
265
266 static inline bool is_link_state_evt(u32 trailer)
267 {
268 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
269 ASYNC_TRAILER_EVENT_CODE_MASK) ==
270 ASYNC_EVENT_CODE_LINK_STATE;
271 }
272
273 static inline bool is_grp5_evt(u32 trailer)
274 {
275 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
276 ASYNC_TRAILER_EVENT_CODE_MASK) ==
277 ASYNC_EVENT_CODE_GRP_5);
278 }
279
280 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
281 {
282 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
283 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
284
285 if (be_mcc_compl_is_new(compl)) {
286 queue_tail_inc(mcc_cq);
287 return compl;
288 }
289 return NULL;
290 }
291
292 void be_async_mcc_enable(struct be_adapter *adapter)
293 {
294 spin_lock_bh(&adapter->mcc_cq_lock);
295
296 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
297 adapter->mcc_obj.rearm_cq = true;
298
299 spin_unlock_bh(&adapter->mcc_cq_lock);
300 }
301
302 void be_async_mcc_disable(struct be_adapter *adapter)
303 {
304 spin_lock_bh(&adapter->mcc_cq_lock);
305
306 adapter->mcc_obj.rearm_cq = false;
307 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
308
309 spin_unlock_bh(&adapter->mcc_cq_lock);
310 }
311
312 int be_process_mcc(struct be_adapter *adapter)
313 {
314 struct be_mcc_compl *compl;
315 int num = 0, status = 0;
316 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
317
318 spin_lock(&adapter->mcc_cq_lock);
319 while ((compl = be_mcc_compl_get(adapter))) {
320 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
321 /* Interpret flags as an async trailer */
322 if (is_link_state_evt(compl->flags))
323 be_async_link_state_process(adapter,
324 (struct be_async_event_link_state *) compl);
325 else if (is_grp5_evt(compl->flags))
326 be_async_grp5_evt_process(adapter,
327 compl->flags, compl);
328 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
329 status = be_mcc_compl_process(adapter, compl);
330 atomic_dec(&mcc_obj->q.used);
331 }
332 be_mcc_compl_use(compl);
333 num++;
334 }
335
336 if (num)
337 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
338
339 spin_unlock(&adapter->mcc_cq_lock);
340 return status;
341 }
342
343 /* Wait till no more pending mcc requests are present */
344 static int be_mcc_wait_compl(struct be_adapter *adapter)
345 {
346 #define mcc_timeout 120000 /* 12s timeout */
347 int i, status = 0;
348 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
349
350 for (i = 0; i < mcc_timeout; i++) {
351 if (be_error(adapter))
352 return -EIO;
353
354 local_bh_disable();
355 status = be_process_mcc(adapter);
356 local_bh_enable();
357
358 if (atomic_read(&mcc_obj->q.used) == 0)
359 break;
360 udelay(100);
361 }
362 if (i == mcc_timeout) {
363 dev_err(&adapter->pdev->dev, "FW not responding\n");
364 adapter->fw_timeout = true;
365 return -EIO;
366 }
367 return status;
368 }
369
370 /* Notify MCC requests and wait for completion */
371 static int be_mcc_notify_wait(struct be_adapter *adapter)
372 {
373 int status;
374 struct be_mcc_wrb *wrb;
375 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
376 u16 index = mcc_obj->q.head;
377 struct be_cmd_resp_hdr *resp;
378
379 index_dec(&index, mcc_obj->q.len);
380 wrb = queue_index_node(&mcc_obj->q, index);
381
382 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
383
384 be_mcc_notify(adapter);
385
386 status = be_mcc_wait_compl(adapter);
387 if (status == -EIO)
388 goto out;
389
390 status = resp->status;
391 out:
392 return status;
393 }
394
395 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
396 {
397 int msecs = 0;
398 u32 ready;
399
400 do {
401 if (be_error(adapter))
402 return -EIO;
403
404 ready = ioread32(db);
405 if (ready == 0xffffffff)
406 return -1;
407
408 ready &= MPU_MAILBOX_DB_RDY_MASK;
409 if (ready)
410 break;
411
412 if (msecs > 4000) {
413 dev_err(&adapter->pdev->dev, "FW not responding\n");
414 adapter->fw_timeout = true;
415 be_detect_error(adapter);
416 return -1;
417 }
418
419 msleep(1);
420 msecs++;
421 } while (true);
422
423 return 0;
424 }
425
426 /*
427 * Insert the mailbox address into the doorbell in two steps
428 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
429 */
430 static int be_mbox_notify_wait(struct be_adapter *adapter)
431 {
432 int status;
433 u32 val = 0;
434 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
435 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
436 struct be_mcc_mailbox *mbox = mbox_mem->va;
437 struct be_mcc_compl *compl = &mbox->compl;
438
439 /* wait for ready to be set */
440 status = be_mbox_db_ready_wait(adapter, db);
441 if (status != 0)
442 return status;
443
444 val |= MPU_MAILBOX_DB_HI_MASK;
445 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
446 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
447 iowrite32(val, db);
448
449 /* wait for ready to be set */
450 status = be_mbox_db_ready_wait(adapter, db);
451 if (status != 0)
452 return status;
453
454 val = 0;
455 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
456 val |= (u32)(mbox_mem->dma >> 4) << 2;
457 iowrite32(val, db);
458
459 status = be_mbox_db_ready_wait(adapter, db);
460 if (status != 0)
461 return status;
462
463 /* A cq entry has been made now */
464 if (be_mcc_compl_is_new(compl)) {
465 status = be_mcc_compl_process(adapter, &mbox->compl);
466 be_mcc_compl_use(compl);
467 if (status)
468 return status;
469 } else {
470 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
471 return -1;
472 }
473 return 0;
474 }
475
476 static u16 be_POST_stage_get(struct be_adapter *adapter)
477 {
478 u32 sem;
479
480 if (BEx_chip(adapter))
481 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
482 else
483 pci_read_config_dword(adapter->pdev,
484 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
485
486 return sem & POST_STAGE_MASK;
487 }
488
489 int lancer_wait_ready(struct be_adapter *adapter)
490 {
491 #define SLIPORT_READY_TIMEOUT 30
492 u32 sliport_status;
493 int status = 0, i;
494
495 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
496 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
497 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
498 break;
499
500 msleep(1000);
501 }
502
503 if (i == SLIPORT_READY_TIMEOUT)
504 status = -1;
505
506 return status;
507 }
508
509 static bool lancer_provisioning_error(struct be_adapter *adapter)
510 {
511 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
512 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
513 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
514 sliport_err1 = ioread32(adapter->db +
515 SLIPORT_ERROR1_OFFSET);
516 sliport_err2 = ioread32(adapter->db +
517 SLIPORT_ERROR2_OFFSET);
518
519 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
520 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
521 return true;
522 }
523 return false;
524 }
525
526 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
527 {
528 int status;
529 u32 sliport_status, err, reset_needed;
530 bool resource_error;
531
532 resource_error = lancer_provisioning_error(adapter);
533 if (resource_error)
534 return -1;
535
536 status = lancer_wait_ready(adapter);
537 if (!status) {
538 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
539 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
540 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
541 if (err && reset_needed) {
542 iowrite32(SLI_PORT_CONTROL_IP_MASK,
543 adapter->db + SLIPORT_CONTROL_OFFSET);
544
545 /* check adapter has corrected the error */
546 status = lancer_wait_ready(adapter);
547 sliport_status = ioread32(adapter->db +
548 SLIPORT_STATUS_OFFSET);
549 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
550 SLIPORT_STATUS_RN_MASK);
551 if (status || sliport_status)
552 status = -1;
553 } else if (err || reset_needed) {
554 status = -1;
555 }
556 }
557 /* Stop error recovery if error is not recoverable.
558 * No resource error is temporary errors and will go away
559 * when PF provisions resources.
560 */
561 resource_error = lancer_provisioning_error(adapter);
562 if (status == -1 && !resource_error)
563 adapter->eeh_error = true;
564
565 return status;
566 }
567
568 int be_fw_wait_ready(struct be_adapter *adapter)
569 {
570 u16 stage;
571 int status, timeout = 0;
572 struct device *dev = &adapter->pdev->dev;
573
574 if (lancer_chip(adapter)) {
575 status = lancer_wait_ready(adapter);
576 return status;
577 }
578
579 do {
580 stage = be_POST_stage_get(adapter);
581 if (stage == POST_STAGE_ARMFW_RDY)
582 return 0;
583
584 dev_info(dev, "Waiting for POST, %ds elapsed\n",
585 timeout);
586 if (msleep_interruptible(2000)) {
587 dev_err(dev, "Waiting for POST aborted\n");
588 return -EINTR;
589 }
590 timeout += 2;
591 } while (timeout < 60);
592
593 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
594 return -1;
595 }
596
597
598 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
599 {
600 return &wrb->payload.sgl[0];
601 }
602
603
604 /* Don't touch the hdr after it's prepared */
605 /* mem will be NULL for embedded commands */
606 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
607 u8 subsystem, u8 opcode, int cmd_len,
608 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
609 {
610 struct be_sge *sge;
611 unsigned long addr = (unsigned long)req_hdr;
612 u64 req_addr = addr;
613
614 req_hdr->opcode = opcode;
615 req_hdr->subsystem = subsystem;
616 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
617 req_hdr->version = 0;
618
619 wrb->tag0 = req_addr & 0xFFFFFFFF;
620 wrb->tag1 = upper_32_bits(req_addr);
621
622 wrb->payload_length = cmd_len;
623 if (mem) {
624 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
625 MCC_WRB_SGE_CNT_SHIFT;
626 sge = nonembedded_sgl(wrb);
627 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
628 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
629 sge->len = cpu_to_le32(mem->size);
630 } else
631 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
632 be_dws_cpu_to_le(wrb, 8);
633 }
634
635 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
636 struct be_dma_mem *mem)
637 {
638 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
639 u64 dma = (u64)mem->dma;
640
641 for (i = 0; i < buf_pages; i++) {
642 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
643 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
644 dma += PAGE_SIZE_4K;
645 }
646 }
647
648 /* Converts interrupt delay in microseconds to multiplier value */
649 static u32 eq_delay_to_mult(u32 usec_delay)
650 {
651 #define MAX_INTR_RATE 651042
652 const u32 round = 10;
653 u32 multiplier;
654
655 if (usec_delay == 0)
656 multiplier = 0;
657 else {
658 u32 interrupt_rate = 1000000 / usec_delay;
659 /* Max delay, corresponding to the lowest interrupt rate */
660 if (interrupt_rate == 0)
661 multiplier = 1023;
662 else {
663 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
664 multiplier /= interrupt_rate;
665 /* Round the multiplier to the closest value.*/
666 multiplier = (multiplier + round/2) / round;
667 multiplier = min(multiplier, (u32)1023);
668 }
669 }
670 return multiplier;
671 }
672
673 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
674 {
675 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
676 struct be_mcc_wrb *wrb
677 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
678 memset(wrb, 0, sizeof(*wrb));
679 return wrb;
680 }
681
682 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
683 {
684 struct be_queue_info *mccq = &adapter->mcc_obj.q;
685 struct be_mcc_wrb *wrb;
686
687 if (!mccq->created)
688 return NULL;
689
690 if (atomic_read(&mccq->used) >= mccq->len) {
691 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
692 return NULL;
693 }
694
695 wrb = queue_head_node(mccq);
696 queue_head_inc(mccq);
697 atomic_inc(&mccq->used);
698 memset(wrb, 0, sizeof(*wrb));
699 return wrb;
700 }
701
702 /* Tell fw we're about to start firing cmds by writing a
703 * special pattern across the wrb hdr; uses mbox
704 */
705 int be_cmd_fw_init(struct be_adapter *adapter)
706 {
707 u8 *wrb;
708 int status;
709
710 if (lancer_chip(adapter))
711 return 0;
712
713 if (mutex_lock_interruptible(&adapter->mbox_lock))
714 return -1;
715
716 wrb = (u8 *)wrb_from_mbox(adapter);
717 *wrb++ = 0xFF;
718 *wrb++ = 0x12;
719 *wrb++ = 0x34;
720 *wrb++ = 0xFF;
721 *wrb++ = 0xFF;
722 *wrb++ = 0x56;
723 *wrb++ = 0x78;
724 *wrb = 0xFF;
725
726 status = be_mbox_notify_wait(adapter);
727
728 mutex_unlock(&adapter->mbox_lock);
729 return status;
730 }
731
732 /* Tell fw we're done with firing cmds by writing a
733 * special pattern across the wrb hdr; uses mbox
734 */
735 int be_cmd_fw_clean(struct be_adapter *adapter)
736 {
737 u8 *wrb;
738 int status;
739
740 if (lancer_chip(adapter))
741 return 0;
742
743 if (mutex_lock_interruptible(&adapter->mbox_lock))
744 return -1;
745
746 wrb = (u8 *)wrb_from_mbox(adapter);
747 *wrb++ = 0xFF;
748 *wrb++ = 0xAA;
749 *wrb++ = 0xBB;
750 *wrb++ = 0xFF;
751 *wrb++ = 0xFF;
752 *wrb++ = 0xCC;
753 *wrb++ = 0xDD;
754 *wrb = 0xFF;
755
756 status = be_mbox_notify_wait(adapter);
757
758 mutex_unlock(&adapter->mbox_lock);
759 return status;
760 }
761
762 int be_cmd_eq_create(struct be_adapter *adapter,
763 struct be_queue_info *eq, int eq_delay)
764 {
765 struct be_mcc_wrb *wrb;
766 struct be_cmd_req_eq_create *req;
767 struct be_dma_mem *q_mem = &eq->dma_mem;
768 int status;
769
770 if (mutex_lock_interruptible(&adapter->mbox_lock))
771 return -1;
772
773 wrb = wrb_from_mbox(adapter);
774 req = embedded_payload(wrb);
775
776 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
777 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
778
779 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
780
781 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
782 /* 4byte eqe*/
783 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
784 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
785 __ilog2_u32(eq->len/256));
786 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
787 eq_delay_to_mult(eq_delay));
788 be_dws_cpu_to_le(req->context, sizeof(req->context));
789
790 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
791
792 status = be_mbox_notify_wait(adapter);
793 if (!status) {
794 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
795 eq->id = le16_to_cpu(resp->eq_id);
796 eq->created = true;
797 }
798
799 mutex_unlock(&adapter->mbox_lock);
800 return status;
801 }
802
803 /* Use MCC */
804 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
805 bool permanent, u32 if_handle, u32 pmac_id)
806 {
807 struct be_mcc_wrb *wrb;
808 struct be_cmd_req_mac_query *req;
809 int status;
810
811 spin_lock_bh(&adapter->mcc_lock);
812
813 wrb = wrb_from_mccq(adapter);
814 if (!wrb) {
815 status = -EBUSY;
816 goto err;
817 }
818 req = embedded_payload(wrb);
819
820 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
821 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
822 req->type = MAC_ADDRESS_TYPE_NETWORK;
823 if (permanent) {
824 req->permanent = 1;
825 } else {
826 req->if_id = cpu_to_le16((u16) if_handle);
827 req->pmac_id = cpu_to_le32(pmac_id);
828 req->permanent = 0;
829 }
830
831 status = be_mcc_notify_wait(adapter);
832 if (!status) {
833 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
834 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
835 }
836
837 err:
838 spin_unlock_bh(&adapter->mcc_lock);
839 return status;
840 }
841
842 /* Uses synchronous MCCQ */
843 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
844 u32 if_id, u32 *pmac_id, u32 domain)
845 {
846 struct be_mcc_wrb *wrb;
847 struct be_cmd_req_pmac_add *req;
848 int status;
849
850 spin_lock_bh(&adapter->mcc_lock);
851
852 wrb = wrb_from_mccq(adapter);
853 if (!wrb) {
854 status = -EBUSY;
855 goto err;
856 }
857 req = embedded_payload(wrb);
858
859 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
860 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
861
862 req->hdr.domain = domain;
863 req->if_id = cpu_to_le32(if_id);
864 memcpy(req->mac_address, mac_addr, ETH_ALEN);
865
866 status = be_mcc_notify_wait(adapter);
867 if (!status) {
868 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
869 *pmac_id = le32_to_cpu(resp->pmac_id);
870 }
871
872 err:
873 spin_unlock_bh(&adapter->mcc_lock);
874
875 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
876 status = -EPERM;
877
878 return status;
879 }
880
881 /* Uses synchronous MCCQ */
882 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
883 {
884 struct be_mcc_wrb *wrb;
885 struct be_cmd_req_pmac_del *req;
886 int status;
887
888 if (pmac_id == -1)
889 return 0;
890
891 spin_lock_bh(&adapter->mcc_lock);
892
893 wrb = wrb_from_mccq(adapter);
894 if (!wrb) {
895 status = -EBUSY;
896 goto err;
897 }
898 req = embedded_payload(wrb);
899
900 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
901 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
902
903 req->hdr.domain = dom;
904 req->if_id = cpu_to_le32(if_id);
905 req->pmac_id = cpu_to_le32(pmac_id);
906
907 status = be_mcc_notify_wait(adapter);
908
909 err:
910 spin_unlock_bh(&adapter->mcc_lock);
911 return status;
912 }
913
914 /* Uses Mbox */
915 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
916 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
917 {
918 struct be_mcc_wrb *wrb;
919 struct be_cmd_req_cq_create *req;
920 struct be_dma_mem *q_mem = &cq->dma_mem;
921 void *ctxt;
922 int status;
923
924 if (mutex_lock_interruptible(&adapter->mbox_lock))
925 return -1;
926
927 wrb = wrb_from_mbox(adapter);
928 req = embedded_payload(wrb);
929 ctxt = &req->context;
930
931 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
932 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
933
934 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
935 if (lancer_chip(adapter)) {
936 req->hdr.version = 2;
937 req->page_size = 1; /* 1 for 4K */
938 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
939 no_delay);
940 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
941 __ilog2_u32(cq->len/256));
942 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
943 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
944 ctxt, 1);
945 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
946 ctxt, eq->id);
947 } else {
948 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
949 coalesce_wm);
950 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
951 ctxt, no_delay);
952 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
953 __ilog2_u32(cq->len/256));
954 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
955 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
956 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
957 }
958
959 be_dws_cpu_to_le(ctxt, sizeof(req->context));
960
961 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
962
963 status = be_mbox_notify_wait(adapter);
964 if (!status) {
965 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
966 cq->id = le16_to_cpu(resp->cq_id);
967 cq->created = true;
968 }
969
970 mutex_unlock(&adapter->mbox_lock);
971
972 return status;
973 }
974
975 static u32 be_encoded_q_len(int q_len)
976 {
977 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
978 if (len_encoded == 16)
979 len_encoded = 0;
980 return len_encoded;
981 }
982
983 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
984 struct be_queue_info *mccq,
985 struct be_queue_info *cq)
986 {
987 struct be_mcc_wrb *wrb;
988 struct be_cmd_req_mcc_ext_create *req;
989 struct be_dma_mem *q_mem = &mccq->dma_mem;
990 void *ctxt;
991 int status;
992
993 if (mutex_lock_interruptible(&adapter->mbox_lock))
994 return -1;
995
996 wrb = wrb_from_mbox(adapter);
997 req = embedded_payload(wrb);
998 ctxt = &req->context;
999
1000 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1001 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1002
1003 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1004 if (lancer_chip(adapter)) {
1005 req->hdr.version = 1;
1006 req->cq_id = cpu_to_le16(cq->id);
1007
1008 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1009 be_encoded_q_len(mccq->len));
1010 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1011 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1012 ctxt, cq->id);
1013 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1014 ctxt, 1);
1015
1016 } else {
1017 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1018 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1019 be_encoded_q_len(mccq->len));
1020 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1021 }
1022
1023 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1024 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1025 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1026
1027 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1028
1029 status = be_mbox_notify_wait(adapter);
1030 if (!status) {
1031 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1032 mccq->id = le16_to_cpu(resp->id);
1033 mccq->created = true;
1034 }
1035 mutex_unlock(&adapter->mbox_lock);
1036
1037 return status;
1038 }
1039
1040 int be_cmd_mccq_org_create(struct be_adapter *adapter,
1041 struct be_queue_info *mccq,
1042 struct be_queue_info *cq)
1043 {
1044 struct be_mcc_wrb *wrb;
1045 struct be_cmd_req_mcc_create *req;
1046 struct be_dma_mem *q_mem = &mccq->dma_mem;
1047 void *ctxt;
1048 int status;
1049
1050 if (mutex_lock_interruptible(&adapter->mbox_lock))
1051 return -1;
1052
1053 wrb = wrb_from_mbox(adapter);
1054 req = embedded_payload(wrb);
1055 ctxt = &req->context;
1056
1057 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1058 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1059
1060 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1061
1062 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1063 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1064 be_encoded_q_len(mccq->len));
1065 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1066
1067 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1068
1069 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1070
1071 status = be_mbox_notify_wait(adapter);
1072 if (!status) {
1073 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1074 mccq->id = le16_to_cpu(resp->id);
1075 mccq->created = true;
1076 }
1077
1078 mutex_unlock(&adapter->mbox_lock);
1079 return status;
1080 }
1081
1082 int be_cmd_mccq_create(struct be_adapter *adapter,
1083 struct be_queue_info *mccq,
1084 struct be_queue_info *cq)
1085 {
1086 int status;
1087
1088 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1089 if (status && !lancer_chip(adapter)) {
1090 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1091 "or newer to avoid conflicting priorities between NIC "
1092 "and FCoE traffic");
1093 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1094 }
1095 return status;
1096 }
1097
1098 int be_cmd_txq_create(struct be_adapter *adapter,
1099 struct be_queue_info *txq,
1100 struct be_queue_info *cq)
1101 {
1102 struct be_mcc_wrb *wrb;
1103 struct be_cmd_req_eth_tx_create *req;
1104 struct be_dma_mem *q_mem = &txq->dma_mem;
1105 void *ctxt;
1106 int status;
1107
1108 spin_lock_bh(&adapter->mcc_lock);
1109
1110 wrb = wrb_from_mccq(adapter);
1111 if (!wrb) {
1112 status = -EBUSY;
1113 goto err;
1114 }
1115
1116 req = embedded_payload(wrb);
1117 ctxt = &req->context;
1118
1119 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1120 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1121
1122 if (lancer_chip(adapter)) {
1123 req->hdr.version = 1;
1124 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1125 adapter->if_handle);
1126 }
1127
1128 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1129 req->ulp_num = BE_ULP1_NUM;
1130 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1131
1132 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1133 be_encoded_q_len(txq->len));
1134 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1135 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1136
1137 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1138
1139 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1140
1141 status = be_mcc_notify_wait(adapter);
1142 if (!status) {
1143 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1144 txq->id = le16_to_cpu(resp->cid);
1145 txq->created = true;
1146 }
1147
1148 err:
1149 spin_unlock_bh(&adapter->mcc_lock);
1150
1151 return status;
1152 }
1153
1154 /* Uses MCC */
1155 int be_cmd_rxq_create(struct be_adapter *adapter,
1156 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1157 u32 if_id, u32 rss, u8 *rss_id)
1158 {
1159 struct be_mcc_wrb *wrb;
1160 struct be_cmd_req_eth_rx_create *req;
1161 struct be_dma_mem *q_mem = &rxq->dma_mem;
1162 int status;
1163
1164 spin_lock_bh(&adapter->mcc_lock);
1165
1166 wrb = wrb_from_mccq(adapter);
1167 if (!wrb) {
1168 status = -EBUSY;
1169 goto err;
1170 }
1171 req = embedded_payload(wrb);
1172
1173 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1174 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1175
1176 req->cq_id = cpu_to_le16(cq_id);
1177 req->frag_size = fls(frag_size) - 1;
1178 req->num_pages = 2;
1179 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1180 req->interface_id = cpu_to_le32(if_id);
1181 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1182 req->rss_queue = cpu_to_le32(rss);
1183
1184 status = be_mcc_notify_wait(adapter);
1185 if (!status) {
1186 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1187 rxq->id = le16_to_cpu(resp->id);
1188 rxq->created = true;
1189 *rss_id = resp->rss_id;
1190 }
1191
1192 err:
1193 spin_unlock_bh(&adapter->mcc_lock);
1194 return status;
1195 }
1196
1197 /* Generic destroyer function for all types of queues
1198 * Uses Mbox
1199 */
1200 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1201 int queue_type)
1202 {
1203 struct be_mcc_wrb *wrb;
1204 struct be_cmd_req_q_destroy *req;
1205 u8 subsys = 0, opcode = 0;
1206 int status;
1207
1208 if (mutex_lock_interruptible(&adapter->mbox_lock))
1209 return -1;
1210
1211 wrb = wrb_from_mbox(adapter);
1212 req = embedded_payload(wrb);
1213
1214 switch (queue_type) {
1215 case QTYPE_EQ:
1216 subsys = CMD_SUBSYSTEM_COMMON;
1217 opcode = OPCODE_COMMON_EQ_DESTROY;
1218 break;
1219 case QTYPE_CQ:
1220 subsys = CMD_SUBSYSTEM_COMMON;
1221 opcode = OPCODE_COMMON_CQ_DESTROY;
1222 break;
1223 case QTYPE_TXQ:
1224 subsys = CMD_SUBSYSTEM_ETH;
1225 opcode = OPCODE_ETH_TX_DESTROY;
1226 break;
1227 case QTYPE_RXQ:
1228 subsys = CMD_SUBSYSTEM_ETH;
1229 opcode = OPCODE_ETH_RX_DESTROY;
1230 break;
1231 case QTYPE_MCCQ:
1232 subsys = CMD_SUBSYSTEM_COMMON;
1233 opcode = OPCODE_COMMON_MCC_DESTROY;
1234 break;
1235 default:
1236 BUG();
1237 }
1238
1239 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1240 NULL);
1241 req->id = cpu_to_le16(q->id);
1242
1243 status = be_mbox_notify_wait(adapter);
1244 q->created = false;
1245
1246 mutex_unlock(&adapter->mbox_lock);
1247 return status;
1248 }
1249
1250 /* Uses MCC */
1251 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1252 {
1253 struct be_mcc_wrb *wrb;
1254 struct be_cmd_req_q_destroy *req;
1255 int status;
1256
1257 spin_lock_bh(&adapter->mcc_lock);
1258
1259 wrb = wrb_from_mccq(adapter);
1260 if (!wrb) {
1261 status = -EBUSY;
1262 goto err;
1263 }
1264 req = embedded_payload(wrb);
1265
1266 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1267 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1268 req->id = cpu_to_le16(q->id);
1269
1270 status = be_mcc_notify_wait(adapter);
1271 q->created = false;
1272
1273 err:
1274 spin_unlock_bh(&adapter->mcc_lock);
1275 return status;
1276 }
1277
1278 /* Create an rx filtering policy configuration on an i/f
1279 * Uses MCCQ
1280 */
1281 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1282 u32 *if_handle, u32 domain)
1283 {
1284 struct be_mcc_wrb *wrb;
1285 struct be_cmd_req_if_create *req;
1286 int status;
1287
1288 spin_lock_bh(&adapter->mcc_lock);
1289
1290 wrb = wrb_from_mccq(adapter);
1291 if (!wrb) {
1292 status = -EBUSY;
1293 goto err;
1294 }
1295 req = embedded_payload(wrb);
1296
1297 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1298 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1299 req->hdr.domain = domain;
1300 req->capability_flags = cpu_to_le32(cap_flags);
1301 req->enable_flags = cpu_to_le32(en_flags);
1302
1303 req->pmac_invalid = true;
1304
1305 status = be_mcc_notify_wait(adapter);
1306 if (!status) {
1307 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1308 *if_handle = le32_to_cpu(resp->interface_id);
1309 }
1310
1311 err:
1312 spin_unlock_bh(&adapter->mcc_lock);
1313 return status;
1314 }
1315
1316 /* Uses MCCQ */
1317 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1318 {
1319 struct be_mcc_wrb *wrb;
1320 struct be_cmd_req_if_destroy *req;
1321 int status;
1322
1323 if (interface_id == -1)
1324 return 0;
1325
1326 spin_lock_bh(&adapter->mcc_lock);
1327
1328 wrb = wrb_from_mccq(adapter);
1329 if (!wrb) {
1330 status = -EBUSY;
1331 goto err;
1332 }
1333 req = embedded_payload(wrb);
1334
1335 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1336 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1337 req->hdr.domain = domain;
1338 req->interface_id = cpu_to_le32(interface_id);
1339
1340 status = be_mcc_notify_wait(adapter);
1341 err:
1342 spin_unlock_bh(&adapter->mcc_lock);
1343 return status;
1344 }
1345
1346 /* Get stats is a non embedded command: the request is not embedded inside
1347 * WRB but is a separate dma memory block
1348 * Uses asynchronous MCC
1349 */
1350 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1351 {
1352 struct be_mcc_wrb *wrb;
1353 struct be_cmd_req_hdr *hdr;
1354 int status = 0;
1355
1356 spin_lock_bh(&adapter->mcc_lock);
1357
1358 wrb = wrb_from_mccq(adapter);
1359 if (!wrb) {
1360 status = -EBUSY;
1361 goto err;
1362 }
1363 hdr = nonemb_cmd->va;
1364
1365 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1366 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1367
1368 /* version 1 of the cmd is not supported only by BE2 */
1369 if (!BE2_chip(adapter))
1370 hdr->version = 1;
1371
1372 be_mcc_notify(adapter);
1373 adapter->stats_cmd_sent = true;
1374
1375 err:
1376 spin_unlock_bh(&adapter->mcc_lock);
1377 return status;
1378 }
1379
1380 /* Lancer Stats */
1381 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1382 struct be_dma_mem *nonemb_cmd)
1383 {
1384
1385 struct be_mcc_wrb *wrb;
1386 struct lancer_cmd_req_pport_stats *req;
1387 int status = 0;
1388
1389 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1390 CMD_SUBSYSTEM_ETH))
1391 return -EPERM;
1392
1393 spin_lock_bh(&adapter->mcc_lock);
1394
1395 wrb = wrb_from_mccq(adapter);
1396 if (!wrb) {
1397 status = -EBUSY;
1398 goto err;
1399 }
1400 req = nonemb_cmd->va;
1401
1402 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1403 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1404 nonemb_cmd);
1405
1406 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1407 req->cmd_params.params.reset_stats = 0;
1408
1409 be_mcc_notify(adapter);
1410 adapter->stats_cmd_sent = true;
1411
1412 err:
1413 spin_unlock_bh(&adapter->mcc_lock);
1414 return status;
1415 }
1416
1417 static int be_mac_to_link_speed(int mac_speed)
1418 {
1419 switch (mac_speed) {
1420 case PHY_LINK_SPEED_ZERO:
1421 return 0;
1422 case PHY_LINK_SPEED_10MBPS:
1423 return 10;
1424 case PHY_LINK_SPEED_100MBPS:
1425 return 100;
1426 case PHY_LINK_SPEED_1GBPS:
1427 return 1000;
1428 case PHY_LINK_SPEED_10GBPS:
1429 return 10000;
1430 }
1431 return 0;
1432 }
1433
1434 /* Uses synchronous mcc
1435 * Returns link_speed in Mbps
1436 */
1437 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1438 u8 *link_status, u32 dom)
1439 {
1440 struct be_mcc_wrb *wrb;
1441 struct be_cmd_req_link_status *req;
1442 int status;
1443
1444 spin_lock_bh(&adapter->mcc_lock);
1445
1446 if (link_status)
1447 *link_status = LINK_DOWN;
1448
1449 wrb = wrb_from_mccq(adapter);
1450 if (!wrb) {
1451 status = -EBUSY;
1452 goto err;
1453 }
1454 req = embedded_payload(wrb);
1455
1456 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1457 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1458
1459 /* version 1 of the cmd is not supported only by BE2 */
1460 if (!BE2_chip(adapter))
1461 req->hdr.version = 1;
1462
1463 req->hdr.domain = dom;
1464
1465 status = be_mcc_notify_wait(adapter);
1466 if (!status) {
1467 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1468 if (link_speed) {
1469 *link_speed = resp->link_speed ?
1470 le16_to_cpu(resp->link_speed) * 10 :
1471 be_mac_to_link_speed(resp->mac_speed);
1472
1473 if (!resp->logical_link_status)
1474 *link_speed = 0;
1475 }
1476 if (link_status)
1477 *link_status = resp->logical_link_status;
1478 }
1479
1480 err:
1481 spin_unlock_bh(&adapter->mcc_lock);
1482 return status;
1483 }
1484
1485 /* Uses synchronous mcc */
1486 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1487 {
1488 struct be_mcc_wrb *wrb;
1489 struct be_cmd_req_get_cntl_addnl_attribs *req;
1490 int status;
1491
1492 spin_lock_bh(&adapter->mcc_lock);
1493
1494 wrb = wrb_from_mccq(adapter);
1495 if (!wrb) {
1496 status = -EBUSY;
1497 goto err;
1498 }
1499 req = embedded_payload(wrb);
1500
1501 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1502 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1503 wrb, NULL);
1504
1505 be_mcc_notify(adapter);
1506
1507 err:
1508 spin_unlock_bh(&adapter->mcc_lock);
1509 return status;
1510 }
1511
1512 /* Uses synchronous mcc */
1513 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1514 {
1515 struct be_mcc_wrb *wrb;
1516 struct be_cmd_req_get_fat *req;
1517 int status;
1518
1519 spin_lock_bh(&adapter->mcc_lock);
1520
1521 wrb = wrb_from_mccq(adapter);
1522 if (!wrb) {
1523 status = -EBUSY;
1524 goto err;
1525 }
1526 req = embedded_payload(wrb);
1527
1528 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1529 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1530 req->fat_operation = cpu_to_le32(QUERY_FAT);
1531 status = be_mcc_notify_wait(adapter);
1532 if (!status) {
1533 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1534 if (log_size && resp->log_size)
1535 *log_size = le32_to_cpu(resp->log_size) -
1536 sizeof(u32);
1537 }
1538 err:
1539 spin_unlock_bh(&adapter->mcc_lock);
1540 return status;
1541 }
1542
1543 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1544 {
1545 struct be_dma_mem get_fat_cmd;
1546 struct be_mcc_wrb *wrb;
1547 struct be_cmd_req_get_fat *req;
1548 u32 offset = 0, total_size, buf_size,
1549 log_offset = sizeof(u32), payload_len;
1550 int status;
1551
1552 if (buf_len == 0)
1553 return;
1554
1555 total_size = buf_len;
1556
1557 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1558 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1559 get_fat_cmd.size,
1560 &get_fat_cmd.dma);
1561 if (!get_fat_cmd.va) {
1562 status = -ENOMEM;
1563 dev_err(&adapter->pdev->dev,
1564 "Memory allocation failure while retrieving FAT data\n");
1565 return;
1566 }
1567
1568 spin_lock_bh(&adapter->mcc_lock);
1569
1570 while (total_size) {
1571 buf_size = min(total_size, (u32)60*1024);
1572 total_size -= buf_size;
1573
1574 wrb = wrb_from_mccq(adapter);
1575 if (!wrb) {
1576 status = -EBUSY;
1577 goto err;
1578 }
1579 req = get_fat_cmd.va;
1580
1581 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1582 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1583 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1584 &get_fat_cmd);
1585
1586 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1587 req->read_log_offset = cpu_to_le32(log_offset);
1588 req->read_log_length = cpu_to_le32(buf_size);
1589 req->data_buffer_size = cpu_to_le32(buf_size);
1590
1591 status = be_mcc_notify_wait(adapter);
1592 if (!status) {
1593 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1594 memcpy(buf + offset,
1595 resp->data_buffer,
1596 le32_to_cpu(resp->read_log_length));
1597 } else {
1598 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1599 goto err;
1600 }
1601 offset += buf_size;
1602 log_offset += buf_size;
1603 }
1604 err:
1605 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1606 get_fat_cmd.va,
1607 get_fat_cmd.dma);
1608 spin_unlock_bh(&adapter->mcc_lock);
1609 }
1610
1611 /* Uses synchronous mcc */
1612 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1613 char *fw_on_flash)
1614 {
1615 struct be_mcc_wrb *wrb;
1616 struct be_cmd_req_get_fw_version *req;
1617 int status;
1618
1619 spin_lock_bh(&adapter->mcc_lock);
1620
1621 wrb = wrb_from_mccq(adapter);
1622 if (!wrb) {
1623 status = -EBUSY;
1624 goto err;
1625 }
1626
1627 req = embedded_payload(wrb);
1628
1629 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1630 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1631 status = be_mcc_notify_wait(adapter);
1632 if (!status) {
1633 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1634 strcpy(fw_ver, resp->firmware_version_string);
1635 if (fw_on_flash)
1636 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1637 }
1638 err:
1639 spin_unlock_bh(&adapter->mcc_lock);
1640 return status;
1641 }
1642
1643 /* set the EQ delay interval of an EQ to specified value
1644 * Uses async mcc
1645 */
1646 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1647 {
1648 struct be_mcc_wrb *wrb;
1649 struct be_cmd_req_modify_eq_delay *req;
1650 int status = 0;
1651
1652 spin_lock_bh(&adapter->mcc_lock);
1653
1654 wrb = wrb_from_mccq(adapter);
1655 if (!wrb) {
1656 status = -EBUSY;
1657 goto err;
1658 }
1659 req = embedded_payload(wrb);
1660
1661 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1662 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1663
1664 req->num_eq = cpu_to_le32(1);
1665 req->delay[0].eq_id = cpu_to_le32(eq_id);
1666 req->delay[0].phase = 0;
1667 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1668
1669 be_mcc_notify(adapter);
1670
1671 err:
1672 spin_unlock_bh(&adapter->mcc_lock);
1673 return status;
1674 }
1675
1676 /* Uses sycnhronous mcc */
1677 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1678 u32 num, bool untagged, bool promiscuous)
1679 {
1680 struct be_mcc_wrb *wrb;
1681 struct be_cmd_req_vlan_config *req;
1682 int status;
1683
1684 spin_lock_bh(&adapter->mcc_lock);
1685
1686 wrb = wrb_from_mccq(adapter);
1687 if (!wrb) {
1688 status = -EBUSY;
1689 goto err;
1690 }
1691 req = embedded_payload(wrb);
1692
1693 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1694 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1695
1696 req->interface_id = if_id;
1697 req->promiscuous = promiscuous;
1698 req->untagged = untagged;
1699 req->num_vlan = num;
1700 if (!promiscuous) {
1701 memcpy(req->normal_vlan, vtag_array,
1702 req->num_vlan * sizeof(vtag_array[0]));
1703 }
1704
1705 status = be_mcc_notify_wait(adapter);
1706
1707 err:
1708 spin_unlock_bh(&adapter->mcc_lock);
1709 return status;
1710 }
1711
1712 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1713 {
1714 struct be_mcc_wrb *wrb;
1715 struct be_dma_mem *mem = &adapter->rx_filter;
1716 struct be_cmd_req_rx_filter *req = mem->va;
1717 int status;
1718
1719 spin_lock_bh(&adapter->mcc_lock);
1720
1721 wrb = wrb_from_mccq(adapter);
1722 if (!wrb) {
1723 status = -EBUSY;
1724 goto err;
1725 }
1726 memset(req, 0, sizeof(*req));
1727 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1728 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1729 wrb, mem);
1730
1731 req->if_id = cpu_to_le32(adapter->if_handle);
1732 if (flags & IFF_PROMISC) {
1733 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1734 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1735 if (value == ON)
1736 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1737 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1738 } else if (flags & IFF_ALLMULTI) {
1739 req->if_flags_mask = req->if_flags =
1740 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1741 } else {
1742 struct netdev_hw_addr *ha;
1743 int i = 0;
1744
1745 req->if_flags_mask = req->if_flags =
1746 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1747
1748 /* Reset mcast promisc mode if already set by setting mask
1749 * and not setting flags field
1750 */
1751 req->if_flags_mask |=
1752 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1753 adapter->if_cap_flags);
1754
1755 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1756 netdev_for_each_mc_addr(ha, adapter->netdev)
1757 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1758 }
1759
1760 status = be_mcc_notify_wait(adapter);
1761 err:
1762 spin_unlock_bh(&adapter->mcc_lock);
1763 return status;
1764 }
1765
1766 /* Uses synchrounous mcc */
1767 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1768 {
1769 struct be_mcc_wrb *wrb;
1770 struct be_cmd_req_set_flow_control *req;
1771 int status;
1772
1773 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1774 CMD_SUBSYSTEM_COMMON))
1775 return -EPERM;
1776
1777 spin_lock_bh(&adapter->mcc_lock);
1778
1779 wrb = wrb_from_mccq(adapter);
1780 if (!wrb) {
1781 status = -EBUSY;
1782 goto err;
1783 }
1784 req = embedded_payload(wrb);
1785
1786 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1787 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1788
1789 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1790 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1791
1792 status = be_mcc_notify_wait(adapter);
1793
1794 err:
1795 spin_unlock_bh(&adapter->mcc_lock);
1796 return status;
1797 }
1798
1799 /* Uses sycn mcc */
1800 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1801 {
1802 struct be_mcc_wrb *wrb;
1803 struct be_cmd_req_get_flow_control *req;
1804 int status;
1805
1806 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1807 CMD_SUBSYSTEM_COMMON))
1808 return -EPERM;
1809
1810 spin_lock_bh(&adapter->mcc_lock);
1811
1812 wrb = wrb_from_mccq(adapter);
1813 if (!wrb) {
1814 status = -EBUSY;
1815 goto err;
1816 }
1817 req = embedded_payload(wrb);
1818
1819 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1820 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1821
1822 status = be_mcc_notify_wait(adapter);
1823 if (!status) {
1824 struct be_cmd_resp_get_flow_control *resp =
1825 embedded_payload(wrb);
1826 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1827 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1828 }
1829
1830 err:
1831 spin_unlock_bh(&adapter->mcc_lock);
1832 return status;
1833 }
1834
1835 /* Uses mbox */
1836 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1837 u32 *mode, u32 *caps)
1838 {
1839 struct be_mcc_wrb *wrb;
1840 struct be_cmd_req_query_fw_cfg *req;
1841 int status;
1842
1843 if (mutex_lock_interruptible(&adapter->mbox_lock))
1844 return -1;
1845
1846 wrb = wrb_from_mbox(adapter);
1847 req = embedded_payload(wrb);
1848
1849 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1850 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1851
1852 status = be_mbox_notify_wait(adapter);
1853 if (!status) {
1854 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1855 *port_num = le32_to_cpu(resp->phys_port);
1856 *mode = le32_to_cpu(resp->function_mode);
1857 *caps = le32_to_cpu(resp->function_caps);
1858 }
1859
1860 mutex_unlock(&adapter->mbox_lock);
1861 return status;
1862 }
1863
1864 /* Uses mbox */
1865 int be_cmd_reset_function(struct be_adapter *adapter)
1866 {
1867 struct be_mcc_wrb *wrb;
1868 struct be_cmd_req_hdr *req;
1869 int status;
1870
1871 if (lancer_chip(adapter)) {
1872 status = lancer_wait_ready(adapter);
1873 if (!status) {
1874 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1875 adapter->db + SLIPORT_CONTROL_OFFSET);
1876 status = lancer_test_and_set_rdy_state(adapter);
1877 }
1878 if (status) {
1879 dev_err(&adapter->pdev->dev,
1880 "Adapter in non recoverable error\n");
1881 }
1882 return status;
1883 }
1884
1885 if (mutex_lock_interruptible(&adapter->mbox_lock))
1886 return -1;
1887
1888 wrb = wrb_from_mbox(adapter);
1889 req = embedded_payload(wrb);
1890
1891 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1892 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1893
1894 status = be_mbox_notify_wait(adapter);
1895
1896 mutex_unlock(&adapter->mbox_lock);
1897 return status;
1898 }
1899
1900 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1901 {
1902 struct be_mcc_wrb *wrb;
1903 struct be_cmd_req_rss_config *req;
1904 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1905 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1906 0x3ea83c02, 0x4a110304};
1907 int status;
1908
1909 if (mutex_lock_interruptible(&adapter->mbox_lock))
1910 return -1;
1911
1912 wrb = wrb_from_mbox(adapter);
1913 req = embedded_payload(wrb);
1914
1915 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1916 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1917
1918 req->if_id = cpu_to_le32(adapter->if_handle);
1919 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1920 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1921
1922 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1923 req->hdr.version = 1;
1924 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1925 RSS_ENABLE_UDP_IPV6);
1926 }
1927
1928 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1929 memcpy(req->cpu_table, rsstable, table_size);
1930 memcpy(req->hash, myhash, sizeof(myhash));
1931 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1932
1933 status = be_mbox_notify_wait(adapter);
1934
1935 mutex_unlock(&adapter->mbox_lock);
1936 return status;
1937 }
1938
1939 /* Uses sync mcc */
1940 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1941 u8 bcn, u8 sts, u8 state)
1942 {
1943 struct be_mcc_wrb *wrb;
1944 struct be_cmd_req_enable_disable_beacon *req;
1945 int status;
1946
1947 spin_lock_bh(&adapter->mcc_lock);
1948
1949 wrb = wrb_from_mccq(adapter);
1950 if (!wrb) {
1951 status = -EBUSY;
1952 goto err;
1953 }
1954 req = embedded_payload(wrb);
1955
1956 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1957 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1958
1959 req->port_num = port_num;
1960 req->beacon_state = state;
1961 req->beacon_duration = bcn;
1962 req->status_duration = sts;
1963
1964 status = be_mcc_notify_wait(adapter);
1965
1966 err:
1967 spin_unlock_bh(&adapter->mcc_lock);
1968 return status;
1969 }
1970
1971 /* Uses sync mcc */
1972 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1973 {
1974 struct be_mcc_wrb *wrb;
1975 struct be_cmd_req_get_beacon_state *req;
1976 int status;
1977
1978 spin_lock_bh(&adapter->mcc_lock);
1979
1980 wrb = wrb_from_mccq(adapter);
1981 if (!wrb) {
1982 status = -EBUSY;
1983 goto err;
1984 }
1985 req = embedded_payload(wrb);
1986
1987 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1988 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1989
1990 req->port_num = port_num;
1991
1992 status = be_mcc_notify_wait(adapter);
1993 if (!status) {
1994 struct be_cmd_resp_get_beacon_state *resp =
1995 embedded_payload(wrb);
1996 *state = resp->beacon_state;
1997 }
1998
1999 err:
2000 spin_unlock_bh(&adapter->mcc_lock);
2001 return status;
2002 }
2003
2004 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2005 u32 data_size, u32 data_offset,
2006 const char *obj_name, u32 *data_written,
2007 u8 *change_status, u8 *addn_status)
2008 {
2009 struct be_mcc_wrb *wrb;
2010 struct lancer_cmd_req_write_object *req;
2011 struct lancer_cmd_resp_write_object *resp;
2012 void *ctxt = NULL;
2013 int status;
2014
2015 spin_lock_bh(&adapter->mcc_lock);
2016 adapter->flash_status = 0;
2017
2018 wrb = wrb_from_mccq(adapter);
2019 if (!wrb) {
2020 status = -EBUSY;
2021 goto err_unlock;
2022 }
2023
2024 req = embedded_payload(wrb);
2025
2026 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2027 OPCODE_COMMON_WRITE_OBJECT,
2028 sizeof(struct lancer_cmd_req_write_object), wrb,
2029 NULL);
2030
2031 ctxt = &req->context;
2032 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2033 write_length, ctxt, data_size);
2034
2035 if (data_size == 0)
2036 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2037 eof, ctxt, 1);
2038 else
2039 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2040 eof, ctxt, 0);
2041
2042 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2043 req->write_offset = cpu_to_le32(data_offset);
2044 strcpy(req->object_name, obj_name);
2045 req->descriptor_count = cpu_to_le32(1);
2046 req->buf_len = cpu_to_le32(data_size);
2047 req->addr_low = cpu_to_le32((cmd->dma +
2048 sizeof(struct lancer_cmd_req_write_object))
2049 & 0xFFFFFFFF);
2050 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2051 sizeof(struct lancer_cmd_req_write_object)));
2052
2053 be_mcc_notify(adapter);
2054 spin_unlock_bh(&adapter->mcc_lock);
2055
2056 if (!wait_for_completion_timeout(&adapter->flash_compl,
2057 msecs_to_jiffies(30000)))
2058 status = -1;
2059 else
2060 status = adapter->flash_status;
2061
2062 resp = embedded_payload(wrb);
2063 if (!status) {
2064 *data_written = le32_to_cpu(resp->actual_write_len);
2065 *change_status = resp->change_status;
2066 } else {
2067 *addn_status = resp->additional_status;
2068 }
2069
2070 return status;
2071
2072 err_unlock:
2073 spin_unlock_bh(&adapter->mcc_lock);
2074 return status;
2075 }
2076
2077 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2078 u32 data_size, u32 data_offset, const char *obj_name,
2079 u32 *data_read, u32 *eof, u8 *addn_status)
2080 {
2081 struct be_mcc_wrb *wrb;
2082 struct lancer_cmd_req_read_object *req;
2083 struct lancer_cmd_resp_read_object *resp;
2084 int status;
2085
2086 spin_lock_bh(&adapter->mcc_lock);
2087
2088 wrb = wrb_from_mccq(adapter);
2089 if (!wrb) {
2090 status = -EBUSY;
2091 goto err_unlock;
2092 }
2093
2094 req = embedded_payload(wrb);
2095
2096 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2097 OPCODE_COMMON_READ_OBJECT,
2098 sizeof(struct lancer_cmd_req_read_object), wrb,
2099 NULL);
2100
2101 req->desired_read_len = cpu_to_le32(data_size);
2102 req->read_offset = cpu_to_le32(data_offset);
2103 strcpy(req->object_name, obj_name);
2104 req->descriptor_count = cpu_to_le32(1);
2105 req->buf_len = cpu_to_le32(data_size);
2106 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2107 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2108
2109 status = be_mcc_notify_wait(adapter);
2110
2111 resp = embedded_payload(wrb);
2112 if (!status) {
2113 *data_read = le32_to_cpu(resp->actual_read_len);
2114 *eof = le32_to_cpu(resp->eof);
2115 } else {
2116 *addn_status = resp->additional_status;
2117 }
2118
2119 err_unlock:
2120 spin_unlock_bh(&adapter->mcc_lock);
2121 return status;
2122 }
2123
2124 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2125 u32 flash_type, u32 flash_opcode, u32 buf_size)
2126 {
2127 struct be_mcc_wrb *wrb;
2128 struct be_cmd_write_flashrom *req;
2129 int status;
2130
2131 spin_lock_bh(&adapter->mcc_lock);
2132 adapter->flash_status = 0;
2133
2134 wrb = wrb_from_mccq(adapter);
2135 if (!wrb) {
2136 status = -EBUSY;
2137 goto err_unlock;
2138 }
2139 req = cmd->va;
2140
2141 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2142 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2143
2144 req->params.op_type = cpu_to_le32(flash_type);
2145 req->params.op_code = cpu_to_le32(flash_opcode);
2146 req->params.data_buf_size = cpu_to_le32(buf_size);
2147
2148 be_mcc_notify(adapter);
2149 spin_unlock_bh(&adapter->mcc_lock);
2150
2151 if (!wait_for_completion_timeout(&adapter->flash_compl,
2152 msecs_to_jiffies(40000)))
2153 status = -1;
2154 else
2155 status = adapter->flash_status;
2156
2157 return status;
2158
2159 err_unlock:
2160 spin_unlock_bh(&adapter->mcc_lock);
2161 return status;
2162 }
2163
2164 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2165 int offset)
2166 {
2167 struct be_mcc_wrb *wrb;
2168 struct be_cmd_read_flash_crc *req;
2169 int status;
2170
2171 spin_lock_bh(&adapter->mcc_lock);
2172
2173 wrb = wrb_from_mccq(adapter);
2174 if (!wrb) {
2175 status = -EBUSY;
2176 goto err;
2177 }
2178 req = embedded_payload(wrb);
2179
2180 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2181 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2182 wrb, NULL);
2183
2184 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2185 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2186 req->params.offset = cpu_to_le32(offset);
2187 req->params.data_buf_size = cpu_to_le32(0x4);
2188
2189 status = be_mcc_notify_wait(adapter);
2190 if (!status)
2191 memcpy(flashed_crc, req->crc, 4);
2192
2193 err:
2194 spin_unlock_bh(&adapter->mcc_lock);
2195 return status;
2196 }
2197
2198 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2199 struct be_dma_mem *nonemb_cmd)
2200 {
2201 struct be_mcc_wrb *wrb;
2202 struct be_cmd_req_acpi_wol_magic_config *req;
2203 int status;
2204
2205 spin_lock_bh(&adapter->mcc_lock);
2206
2207 wrb = wrb_from_mccq(adapter);
2208 if (!wrb) {
2209 status = -EBUSY;
2210 goto err;
2211 }
2212 req = nonemb_cmd->va;
2213
2214 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2215 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2216 nonemb_cmd);
2217 memcpy(req->magic_mac, mac, ETH_ALEN);
2218
2219 status = be_mcc_notify_wait(adapter);
2220
2221 err:
2222 spin_unlock_bh(&adapter->mcc_lock);
2223 return status;
2224 }
2225
2226 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2227 u8 loopback_type, u8 enable)
2228 {
2229 struct be_mcc_wrb *wrb;
2230 struct be_cmd_req_set_lmode *req;
2231 int status;
2232
2233 spin_lock_bh(&adapter->mcc_lock);
2234
2235 wrb = wrb_from_mccq(adapter);
2236 if (!wrb) {
2237 status = -EBUSY;
2238 goto err;
2239 }
2240
2241 req = embedded_payload(wrb);
2242
2243 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2244 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2245 NULL);
2246
2247 req->src_port = port_num;
2248 req->dest_port = port_num;
2249 req->loopback_type = loopback_type;
2250 req->loopback_state = enable;
2251
2252 status = be_mcc_notify_wait(adapter);
2253 err:
2254 spin_unlock_bh(&adapter->mcc_lock);
2255 return status;
2256 }
2257
2258 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2259 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2260 {
2261 struct be_mcc_wrb *wrb;
2262 struct be_cmd_req_loopback_test *req;
2263 int status;
2264
2265 spin_lock_bh(&adapter->mcc_lock);
2266
2267 wrb = wrb_from_mccq(adapter);
2268 if (!wrb) {
2269 status = -EBUSY;
2270 goto err;
2271 }
2272
2273 req = embedded_payload(wrb);
2274
2275 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2276 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2277 req->hdr.timeout = cpu_to_le32(4);
2278
2279 req->pattern = cpu_to_le64(pattern);
2280 req->src_port = cpu_to_le32(port_num);
2281 req->dest_port = cpu_to_le32(port_num);
2282 req->pkt_size = cpu_to_le32(pkt_size);
2283 req->num_pkts = cpu_to_le32(num_pkts);
2284 req->loopback_type = cpu_to_le32(loopback_type);
2285
2286 status = be_mcc_notify_wait(adapter);
2287 if (!status) {
2288 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2289 status = le32_to_cpu(resp->status);
2290 }
2291
2292 err:
2293 spin_unlock_bh(&adapter->mcc_lock);
2294 return status;
2295 }
2296
2297 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2298 u32 byte_cnt, struct be_dma_mem *cmd)
2299 {
2300 struct be_mcc_wrb *wrb;
2301 struct be_cmd_req_ddrdma_test *req;
2302 int status;
2303 int i, j = 0;
2304
2305 spin_lock_bh(&adapter->mcc_lock);
2306
2307 wrb = wrb_from_mccq(adapter);
2308 if (!wrb) {
2309 status = -EBUSY;
2310 goto err;
2311 }
2312 req = cmd->va;
2313 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2314 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2315
2316 req->pattern = cpu_to_le64(pattern);
2317 req->byte_count = cpu_to_le32(byte_cnt);
2318 for (i = 0; i < byte_cnt; i++) {
2319 req->snd_buff[i] = (u8)(pattern >> (j*8));
2320 j++;
2321 if (j > 7)
2322 j = 0;
2323 }
2324
2325 status = be_mcc_notify_wait(adapter);
2326
2327 if (!status) {
2328 struct be_cmd_resp_ddrdma_test *resp;
2329 resp = cmd->va;
2330 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2331 resp->snd_err) {
2332 status = -1;
2333 }
2334 }
2335
2336 err:
2337 spin_unlock_bh(&adapter->mcc_lock);
2338 return status;
2339 }
2340
2341 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2342 struct be_dma_mem *nonemb_cmd)
2343 {
2344 struct be_mcc_wrb *wrb;
2345 struct be_cmd_req_seeprom_read *req;
2346 struct be_sge *sge;
2347 int status;
2348
2349 spin_lock_bh(&adapter->mcc_lock);
2350
2351 wrb = wrb_from_mccq(adapter);
2352 if (!wrb) {
2353 status = -EBUSY;
2354 goto err;
2355 }
2356 req = nonemb_cmd->va;
2357 sge = nonembedded_sgl(wrb);
2358
2359 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2360 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2361 nonemb_cmd);
2362
2363 status = be_mcc_notify_wait(adapter);
2364
2365 err:
2366 spin_unlock_bh(&adapter->mcc_lock);
2367 return status;
2368 }
2369
2370 int be_cmd_get_phy_info(struct be_adapter *adapter)
2371 {
2372 struct be_mcc_wrb *wrb;
2373 struct be_cmd_req_get_phy_info *req;
2374 struct be_dma_mem cmd;
2375 int status;
2376
2377 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2378 CMD_SUBSYSTEM_COMMON))
2379 return -EPERM;
2380
2381 spin_lock_bh(&adapter->mcc_lock);
2382
2383 wrb = wrb_from_mccq(adapter);
2384 if (!wrb) {
2385 status = -EBUSY;
2386 goto err;
2387 }
2388 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2389 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2390 &cmd.dma);
2391 if (!cmd.va) {
2392 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2393 status = -ENOMEM;
2394 goto err;
2395 }
2396
2397 req = cmd.va;
2398
2399 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2400 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2401 wrb, &cmd);
2402
2403 status = be_mcc_notify_wait(adapter);
2404 if (!status) {
2405 struct be_phy_info *resp_phy_info =
2406 cmd.va + sizeof(struct be_cmd_req_hdr);
2407 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2408 adapter->phy.interface_type =
2409 le16_to_cpu(resp_phy_info->interface_type);
2410 adapter->phy.auto_speeds_supported =
2411 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2412 adapter->phy.fixed_speeds_supported =
2413 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2414 adapter->phy.misc_params =
2415 le32_to_cpu(resp_phy_info->misc_params);
2416 }
2417 pci_free_consistent(adapter->pdev, cmd.size,
2418 cmd.va, cmd.dma);
2419 err:
2420 spin_unlock_bh(&adapter->mcc_lock);
2421 return status;
2422 }
2423
2424 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2425 {
2426 struct be_mcc_wrb *wrb;
2427 struct be_cmd_req_set_qos *req;
2428 int status;
2429
2430 spin_lock_bh(&adapter->mcc_lock);
2431
2432 wrb = wrb_from_mccq(adapter);
2433 if (!wrb) {
2434 status = -EBUSY;
2435 goto err;
2436 }
2437
2438 req = embedded_payload(wrb);
2439
2440 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2441 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2442
2443 req->hdr.domain = domain;
2444 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2445 req->max_bps_nic = cpu_to_le32(bps);
2446
2447 status = be_mcc_notify_wait(adapter);
2448
2449 err:
2450 spin_unlock_bh(&adapter->mcc_lock);
2451 return status;
2452 }
2453
2454 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2455 {
2456 struct be_mcc_wrb *wrb;
2457 struct be_cmd_req_cntl_attribs *req;
2458 struct be_cmd_resp_cntl_attribs *resp;
2459 int status;
2460 int payload_len = max(sizeof(*req), sizeof(*resp));
2461 struct mgmt_controller_attrib *attribs;
2462 struct be_dma_mem attribs_cmd;
2463
2464 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2465 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2466 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2467 &attribs_cmd.dma);
2468 if (!attribs_cmd.va) {
2469 dev_err(&adapter->pdev->dev,
2470 "Memory allocation failure\n");
2471 return -ENOMEM;
2472 }
2473
2474 if (mutex_lock_interruptible(&adapter->mbox_lock))
2475 return -1;
2476
2477 wrb = wrb_from_mbox(adapter);
2478 if (!wrb) {
2479 status = -EBUSY;
2480 goto err;
2481 }
2482 req = attribs_cmd.va;
2483
2484 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2485 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2486 &attribs_cmd);
2487
2488 status = be_mbox_notify_wait(adapter);
2489 if (!status) {
2490 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2491 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2492 }
2493
2494 err:
2495 mutex_unlock(&adapter->mbox_lock);
2496 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2497 attribs_cmd.dma);
2498 return status;
2499 }
2500
2501 /* Uses mbox */
2502 int be_cmd_req_native_mode(struct be_adapter *adapter)
2503 {
2504 struct be_mcc_wrb *wrb;
2505 struct be_cmd_req_set_func_cap *req;
2506 int status;
2507
2508 if (mutex_lock_interruptible(&adapter->mbox_lock))
2509 return -1;
2510
2511 wrb = wrb_from_mbox(adapter);
2512 if (!wrb) {
2513 status = -EBUSY;
2514 goto err;
2515 }
2516
2517 req = embedded_payload(wrb);
2518
2519 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2520 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2521
2522 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2523 CAPABILITY_BE3_NATIVE_ERX_API);
2524 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2525
2526 status = be_mbox_notify_wait(adapter);
2527 if (!status) {
2528 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2529 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2530 CAPABILITY_BE3_NATIVE_ERX_API;
2531 if (!adapter->be3_native)
2532 dev_warn(&adapter->pdev->dev,
2533 "adapter not in advanced mode\n");
2534 }
2535 err:
2536 mutex_unlock(&adapter->mbox_lock);
2537 return status;
2538 }
2539
2540 /* Get privilege(s) for a function */
2541 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2542 u32 domain)
2543 {
2544 struct be_mcc_wrb *wrb;
2545 struct be_cmd_req_get_fn_privileges *req;
2546 int status;
2547
2548 spin_lock_bh(&adapter->mcc_lock);
2549
2550 wrb = wrb_from_mccq(adapter);
2551 if (!wrb) {
2552 status = -EBUSY;
2553 goto err;
2554 }
2555
2556 req = embedded_payload(wrb);
2557
2558 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2559 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2560 wrb, NULL);
2561
2562 req->hdr.domain = domain;
2563
2564 status = be_mcc_notify_wait(adapter);
2565 if (!status) {
2566 struct be_cmd_resp_get_fn_privileges *resp =
2567 embedded_payload(wrb);
2568 *privilege = le32_to_cpu(resp->privilege_mask);
2569 }
2570
2571 err:
2572 spin_unlock_bh(&adapter->mcc_lock);
2573 return status;
2574 }
2575
2576 /* Uses synchronous MCCQ */
2577 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2578 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2579 {
2580 struct be_mcc_wrb *wrb;
2581 struct be_cmd_req_get_mac_list *req;
2582 int status;
2583 int mac_count;
2584 struct be_dma_mem get_mac_list_cmd;
2585 int i;
2586
2587 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2588 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2589 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2590 get_mac_list_cmd.size,
2591 &get_mac_list_cmd.dma);
2592
2593 if (!get_mac_list_cmd.va) {
2594 dev_err(&adapter->pdev->dev,
2595 "Memory allocation failure during GET_MAC_LIST\n");
2596 return -ENOMEM;
2597 }
2598
2599 spin_lock_bh(&adapter->mcc_lock);
2600
2601 wrb = wrb_from_mccq(adapter);
2602 if (!wrb) {
2603 status = -EBUSY;
2604 goto out;
2605 }
2606
2607 req = get_mac_list_cmd.va;
2608
2609 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2610 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2611 wrb, &get_mac_list_cmd);
2612
2613 req->hdr.domain = domain;
2614 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2615 req->perm_override = 1;
2616
2617 status = be_mcc_notify_wait(adapter);
2618 if (!status) {
2619 struct be_cmd_resp_get_mac_list *resp =
2620 get_mac_list_cmd.va;
2621 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2622 /* Mac list returned could contain one or more active mac_ids
2623 * or one or more true or pseudo permanant mac addresses.
2624 * If an active mac_id is present, return first active mac_id
2625 * found.
2626 */
2627 for (i = 0; i < mac_count; i++) {
2628 struct get_list_macaddr *mac_entry;
2629 u16 mac_addr_size;
2630 u32 mac_id;
2631
2632 mac_entry = &resp->macaddr_list[i];
2633 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2634 /* mac_id is a 32 bit value and mac_addr size
2635 * is 6 bytes
2636 */
2637 if (mac_addr_size == sizeof(u32)) {
2638 *pmac_id_active = true;
2639 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2640 *pmac_id = le32_to_cpu(mac_id);
2641 goto out;
2642 }
2643 }
2644 /* If no active mac_id found, return first mac addr */
2645 *pmac_id_active = false;
2646 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2647 ETH_ALEN);
2648 }
2649
2650 out:
2651 spin_unlock_bh(&adapter->mcc_lock);
2652 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2653 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2654 return status;
2655 }
2656
2657 /* Uses synchronous MCCQ */
2658 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2659 u8 mac_count, u32 domain)
2660 {
2661 struct be_mcc_wrb *wrb;
2662 struct be_cmd_req_set_mac_list *req;
2663 int status;
2664 struct be_dma_mem cmd;
2665
2666 memset(&cmd, 0, sizeof(struct be_dma_mem));
2667 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2668 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2669 &cmd.dma, GFP_KERNEL);
2670 if (!cmd.va) {
2671 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2672 return -ENOMEM;
2673 }
2674
2675 spin_lock_bh(&adapter->mcc_lock);
2676
2677 wrb = wrb_from_mccq(adapter);
2678 if (!wrb) {
2679 status = -EBUSY;
2680 goto err;
2681 }
2682
2683 req = cmd.va;
2684 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2685 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2686 wrb, &cmd);
2687
2688 req->hdr.domain = domain;
2689 req->mac_count = mac_count;
2690 if (mac_count)
2691 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2692
2693 status = be_mcc_notify_wait(adapter);
2694
2695 err:
2696 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2697 cmd.va, cmd.dma);
2698 spin_unlock_bh(&adapter->mcc_lock);
2699 return status;
2700 }
2701
2702 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2703 u32 domain, u16 intf_id)
2704 {
2705 struct be_mcc_wrb *wrb;
2706 struct be_cmd_req_set_hsw_config *req;
2707 void *ctxt;
2708 int status;
2709
2710 spin_lock_bh(&adapter->mcc_lock);
2711
2712 wrb = wrb_from_mccq(adapter);
2713 if (!wrb) {
2714 status = -EBUSY;
2715 goto err;
2716 }
2717
2718 req = embedded_payload(wrb);
2719 ctxt = &req->context;
2720
2721 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2722 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2723
2724 req->hdr.domain = domain;
2725 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2726 if (pvid) {
2727 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2728 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2729 }
2730
2731 be_dws_cpu_to_le(req->context, sizeof(req->context));
2732 status = be_mcc_notify_wait(adapter);
2733
2734 err:
2735 spin_unlock_bh(&adapter->mcc_lock);
2736 return status;
2737 }
2738
2739 /* Get Hyper switch config */
2740 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2741 u32 domain, u16 intf_id)
2742 {
2743 struct be_mcc_wrb *wrb;
2744 struct be_cmd_req_get_hsw_config *req;
2745 void *ctxt;
2746 int status;
2747 u16 vid;
2748
2749 spin_lock_bh(&adapter->mcc_lock);
2750
2751 wrb = wrb_from_mccq(adapter);
2752 if (!wrb) {
2753 status = -EBUSY;
2754 goto err;
2755 }
2756
2757 req = embedded_payload(wrb);
2758 ctxt = &req->context;
2759
2760 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2761 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2762
2763 req->hdr.domain = domain;
2764 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2765 intf_id);
2766 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2767 be_dws_cpu_to_le(req->context, sizeof(req->context));
2768
2769 status = be_mcc_notify_wait(adapter);
2770 if (!status) {
2771 struct be_cmd_resp_get_hsw_config *resp =
2772 embedded_payload(wrb);
2773 be_dws_le_to_cpu(&resp->context,
2774 sizeof(resp->context));
2775 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2776 pvid, &resp->context);
2777 *pvid = le16_to_cpu(vid);
2778 }
2779
2780 err:
2781 spin_unlock_bh(&adapter->mcc_lock);
2782 return status;
2783 }
2784
2785 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2786 {
2787 struct be_mcc_wrb *wrb;
2788 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2789 int status;
2790 int payload_len = sizeof(*req);
2791 struct be_dma_mem cmd;
2792
2793 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2794 CMD_SUBSYSTEM_ETH))
2795 return -EPERM;
2796
2797 memset(&cmd, 0, sizeof(struct be_dma_mem));
2798 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2799 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2800 &cmd.dma);
2801 if (!cmd.va) {
2802 dev_err(&adapter->pdev->dev,
2803 "Memory allocation failure\n");
2804 return -ENOMEM;
2805 }
2806
2807 if (mutex_lock_interruptible(&adapter->mbox_lock))
2808 return -1;
2809
2810 wrb = wrb_from_mbox(adapter);
2811 if (!wrb) {
2812 status = -EBUSY;
2813 goto err;
2814 }
2815
2816 req = cmd.va;
2817
2818 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2819 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2820 payload_len, wrb, &cmd);
2821
2822 req->hdr.version = 1;
2823 req->query_options = BE_GET_WOL_CAP;
2824
2825 status = be_mbox_notify_wait(adapter);
2826 if (!status) {
2827 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2828 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2829
2830 /* the command could succeed misleadingly on old f/w
2831 * which is not aware of the V1 version. fake an error. */
2832 if (resp->hdr.response_length < payload_len) {
2833 status = -1;
2834 goto err;
2835 }
2836 adapter->wol_cap = resp->wol_settings;
2837 }
2838 err:
2839 mutex_unlock(&adapter->mbox_lock);
2840 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2841 return status;
2842
2843 }
2844 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2845 struct be_dma_mem *cmd)
2846 {
2847 struct be_mcc_wrb *wrb;
2848 struct be_cmd_req_get_ext_fat_caps *req;
2849 int status;
2850
2851 if (mutex_lock_interruptible(&adapter->mbox_lock))
2852 return -1;
2853
2854 wrb = wrb_from_mbox(adapter);
2855 if (!wrb) {
2856 status = -EBUSY;
2857 goto err;
2858 }
2859
2860 req = cmd->va;
2861 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2862 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2863 cmd->size, wrb, cmd);
2864 req->parameter_type = cpu_to_le32(1);
2865
2866 status = be_mbox_notify_wait(adapter);
2867 err:
2868 mutex_unlock(&adapter->mbox_lock);
2869 return status;
2870 }
2871
2872 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2873 struct be_dma_mem *cmd,
2874 struct be_fat_conf_params *configs)
2875 {
2876 struct be_mcc_wrb *wrb;
2877 struct be_cmd_req_set_ext_fat_caps *req;
2878 int status;
2879
2880 spin_lock_bh(&adapter->mcc_lock);
2881
2882 wrb = wrb_from_mccq(adapter);
2883 if (!wrb) {
2884 status = -EBUSY;
2885 goto err;
2886 }
2887
2888 req = cmd->va;
2889 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2890 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2891 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2892 cmd->size, wrb, cmd);
2893
2894 status = be_mcc_notify_wait(adapter);
2895 err:
2896 spin_unlock_bh(&adapter->mcc_lock);
2897 return status;
2898 }
2899
2900 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2901 {
2902 struct be_mcc_wrb *wrb;
2903 struct be_cmd_req_get_port_name *req;
2904 int status;
2905
2906 if (!lancer_chip(adapter)) {
2907 *port_name = adapter->hba_port_num + '0';
2908 return 0;
2909 }
2910
2911 spin_lock_bh(&adapter->mcc_lock);
2912
2913 wrb = wrb_from_mccq(adapter);
2914 if (!wrb) {
2915 status = -EBUSY;
2916 goto err;
2917 }
2918
2919 req = embedded_payload(wrb);
2920
2921 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2922 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2923 NULL);
2924 req->hdr.version = 1;
2925
2926 status = be_mcc_notify_wait(adapter);
2927 if (!status) {
2928 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2929 *port_name = resp->port_name[adapter->hba_port_num];
2930 } else {
2931 *port_name = adapter->hba_port_num + '0';
2932 }
2933 err:
2934 spin_unlock_bh(&adapter->mcc_lock);
2935 return status;
2936 }
2937
2938 static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2939 u32 max_buf_size)
2940 {
2941 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2942 int i;
2943
2944 for (i = 0; i < desc_count; i++) {
2945 desc->desc_len = RESOURCE_DESC_SIZE;
2946 if (((void *)desc + desc->desc_len) >
2947 (void *)(buf + max_buf_size)) {
2948 desc = NULL;
2949 break;
2950 }
2951
2952 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2953 break;
2954
2955 desc = (void *)desc + desc->desc_len;
2956 }
2957
2958 if (!desc || i == MAX_RESOURCE_DESC)
2959 return NULL;
2960
2961 return desc;
2962 }
2963
2964 /* Uses Mbox */
2965 int be_cmd_get_func_config(struct be_adapter *adapter)
2966 {
2967 struct be_mcc_wrb *wrb;
2968 struct be_cmd_req_get_func_config *req;
2969 int status;
2970 struct be_dma_mem cmd;
2971
2972 memset(&cmd, 0, sizeof(struct be_dma_mem));
2973 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2974 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2975 &cmd.dma);
2976 if (!cmd.va) {
2977 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2978 return -ENOMEM;
2979 }
2980 if (mutex_lock_interruptible(&adapter->mbox_lock))
2981 return -1;
2982
2983 wrb = wrb_from_mbox(adapter);
2984 if (!wrb) {
2985 status = -EBUSY;
2986 goto err;
2987 }
2988
2989 req = cmd.va;
2990
2991 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2992 OPCODE_COMMON_GET_FUNC_CONFIG,
2993 cmd.size, wrb, &cmd);
2994
2995 status = be_mbox_notify_wait(adapter);
2996 if (!status) {
2997 struct be_cmd_resp_get_func_config *resp = cmd.va;
2998 u32 desc_count = le32_to_cpu(resp->desc_count);
2999 struct be_nic_resource_desc *desc;
3000
3001 desc = be_get_nic_desc(resp->func_param, desc_count,
3002 sizeof(resp->func_param));
3003 if (!desc) {
3004 status = -EINVAL;
3005 goto err;
3006 }
3007
3008 adapter->pf_number = desc->pf_num;
3009 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3010 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3011 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3012 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3013 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3014 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3015
3016 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3017 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3018 }
3019 err:
3020 mutex_unlock(&adapter->mbox_lock);
3021 pci_free_consistent(adapter->pdev, cmd.size,
3022 cmd.va, cmd.dma);
3023 return status;
3024 }
3025
3026 /* Uses sync mcc */
3027 int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3028 u8 domain)
3029 {
3030 struct be_mcc_wrb *wrb;
3031 struct be_cmd_req_get_profile_config *req;
3032 int status;
3033 struct be_dma_mem cmd;
3034
3035 memset(&cmd, 0, sizeof(struct be_dma_mem));
3036 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3037 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3038 &cmd.dma);
3039 if (!cmd.va) {
3040 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3041 return -ENOMEM;
3042 }
3043
3044 spin_lock_bh(&adapter->mcc_lock);
3045
3046 wrb = wrb_from_mccq(adapter);
3047 if (!wrb) {
3048 status = -EBUSY;
3049 goto err;
3050 }
3051
3052 req = cmd.va;
3053
3054 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3055 OPCODE_COMMON_GET_PROFILE_CONFIG,
3056 cmd.size, wrb, &cmd);
3057
3058 req->type = ACTIVE_PROFILE_TYPE;
3059 req->hdr.domain = domain;
3060
3061 status = be_mcc_notify_wait(adapter);
3062 if (!status) {
3063 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3064 u32 desc_count = le32_to_cpu(resp->desc_count);
3065 struct be_nic_resource_desc *desc;
3066
3067 desc = be_get_nic_desc(resp->func_param, desc_count,
3068 sizeof(resp->func_param));
3069
3070 if (!desc) {
3071 status = -EINVAL;
3072 goto err;
3073 }
3074 *cap_flags = le32_to_cpu(desc->cap_flags);
3075 }
3076 err:
3077 spin_unlock_bh(&adapter->mcc_lock);
3078 pci_free_consistent(adapter->pdev, cmd.size,
3079 cmd.va, cmd.dma);
3080 return status;
3081 }
3082
3083 /* Uses sync mcc */
3084 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3085 u8 domain)
3086 {
3087 struct be_mcc_wrb *wrb;
3088 struct be_cmd_req_set_profile_config *req;
3089 int status;
3090
3091 spin_lock_bh(&adapter->mcc_lock);
3092
3093 wrb = wrb_from_mccq(adapter);
3094 if (!wrb) {
3095 status = -EBUSY;
3096 goto err;
3097 }
3098
3099 req = embedded_payload(wrb);
3100
3101 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3102 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3103 wrb, NULL);
3104
3105 req->hdr.domain = domain;
3106 req->desc_count = cpu_to_le32(1);
3107
3108 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3109 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3110 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3111 req->nic_desc.pf_num = adapter->pf_number;
3112 req->nic_desc.vf_num = domain;
3113
3114 /* Mark fields invalid */
3115 req->nic_desc.unicast_mac_count = 0xFFFF;
3116 req->nic_desc.mcc_count = 0xFFFF;
3117 req->nic_desc.vlan_count = 0xFFFF;
3118 req->nic_desc.mcast_mac_count = 0xFFFF;
3119 req->nic_desc.txq_count = 0xFFFF;
3120 req->nic_desc.rq_count = 0xFFFF;
3121 req->nic_desc.rssq_count = 0xFFFF;
3122 req->nic_desc.lro_count = 0xFFFF;
3123 req->nic_desc.cq_count = 0xFFFF;
3124 req->nic_desc.toe_conn_count = 0xFFFF;
3125 req->nic_desc.eq_count = 0xFFFF;
3126 req->nic_desc.link_param = 0xFF;
3127 req->nic_desc.bw_min = 0xFFFFFFFF;
3128 req->nic_desc.acpi_params = 0xFF;
3129 req->nic_desc.wol_param = 0x0F;
3130
3131 /* Change BW */
3132 req->nic_desc.bw_min = cpu_to_le32(bps);
3133 req->nic_desc.bw_max = cpu_to_le32(bps);
3134 status = be_mcc_notify_wait(adapter);
3135 err:
3136 spin_unlock_bh(&adapter->mcc_lock);
3137 return status;
3138 }
3139
3140 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3141 int vf_num)
3142 {
3143 struct be_mcc_wrb *wrb;
3144 struct be_cmd_req_get_iface_list *req;
3145 struct be_cmd_resp_get_iface_list *resp;
3146 int status;
3147
3148 spin_lock_bh(&adapter->mcc_lock);
3149
3150 wrb = wrb_from_mccq(adapter);
3151 if (!wrb) {
3152 status = -EBUSY;
3153 goto err;
3154 }
3155 req = embedded_payload(wrb);
3156
3157 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3158 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3159 wrb, NULL);
3160 req->hdr.domain = vf_num + 1;
3161
3162 status = be_mcc_notify_wait(adapter);
3163 if (!status) {
3164 resp = (struct be_cmd_resp_get_iface_list *)req;
3165 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3166 }
3167
3168 err:
3169 spin_unlock_bh(&adapter->mcc_lock);
3170 return status;
3171 }
3172
3173 /* Uses sync mcc */
3174 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3175 {
3176 struct be_mcc_wrb *wrb;
3177 struct be_cmd_enable_disable_vf *req;
3178 int status;
3179
3180 if (!lancer_chip(adapter))
3181 return 0;
3182
3183 spin_lock_bh(&adapter->mcc_lock);
3184
3185 wrb = wrb_from_mccq(adapter);
3186 if (!wrb) {
3187 status = -EBUSY;
3188 goto err;
3189 }
3190
3191 req = embedded_payload(wrb);
3192
3193 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3194 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3195 wrb, NULL);
3196
3197 req->hdr.domain = domain;
3198 req->enable = 1;
3199 status = be_mcc_notify_wait(adapter);
3200 err:
3201 spin_unlock_bh(&adapter->mcc_lock);
3202 return status;
3203 }
3204
3205 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3206 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3207 {
3208 struct be_adapter *adapter = netdev_priv(netdev_handle);
3209 struct be_mcc_wrb *wrb;
3210 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3211 struct be_cmd_req_hdr *req;
3212 struct be_cmd_resp_hdr *resp;
3213 int status;
3214
3215 spin_lock_bh(&adapter->mcc_lock);
3216
3217 wrb = wrb_from_mccq(adapter);
3218 if (!wrb) {
3219 status = -EBUSY;
3220 goto err;
3221 }
3222 req = embedded_payload(wrb);
3223 resp = embedded_payload(wrb);
3224
3225 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3226 hdr->opcode, wrb_payload_size, wrb, NULL);
3227 memcpy(req, wrb_payload, wrb_payload_size);
3228 be_dws_cpu_to_le(req, wrb_payload_size);
3229
3230 status = be_mcc_notify_wait(adapter);
3231 if (cmd_status)
3232 *cmd_status = (status & 0xffff);
3233 if (ext_status)
3234 *ext_status = 0;
3235 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3236 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3237 err:
3238 spin_unlock_bh(&adapter->mcc_lock);
3239 return status;
3240 }
3241 EXPORT_SYMBOL(be_roce_mcc_cmd);
This page took 0.14087 seconds and 5 git commands to generate.