be2net: fix access to SEMAPHORE reg
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
1 /*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23 {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 CMD_SUBSYSTEM_ETH,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 },
29 {
30 OPCODE_COMMON_GET_FLOW_CONTROL,
31 CMD_SUBSYSTEM_COMMON,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 },
35 {
36 OPCODE_COMMON_SET_FLOW_CONTROL,
37 CMD_SUBSYSTEM_COMMON,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 },
41 {
42 OPCODE_ETH_GET_PPORT_STATS,
43 CMD_SUBSYSTEM_ETH,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 },
47 {
48 OPCODE_COMMON_GET_PHY_DETAILS,
49 CMD_SUBSYSTEM_COMMON,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 }
53 };
54
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56 u8 subsystem)
57 {
58 int i;
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
61
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66 return false;
67
68 return true;
69 }
70
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 {
73 return wrb->payload.embedded_payload;
74 }
75
76 static void be_mcc_notify(struct be_adapter *adapter)
77 {
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
79 u32 val = 0;
80
81 if (be_error(adapter))
82 return;
83
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86
87 wmb();
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89 }
90
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
93 * little endian) */
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95 {
96 if (compl->flags != 0) {
97 compl->flags = le32_to_cpu(compl->flags);
98 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
99 return true;
100 } else {
101 return false;
102 }
103 }
104
105 /* Need to reset the entire word that houses the valid bit */
106 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
107 {
108 compl->flags = 0;
109 }
110
111 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
112 {
113 unsigned long addr;
114
115 addr = tag1;
116 addr = ((addr << 16) << 16) | tag0;
117 return (void *)addr;
118 }
119
120 static int be_mcc_compl_process(struct be_adapter *adapter,
121 struct be_mcc_compl *compl)
122 {
123 u16 compl_status, extd_status;
124 struct be_cmd_resp_hdr *resp_hdr;
125 u8 opcode = 0, subsystem = 0;
126
127 /* Just swap the status to host endian; mcc tag is opaquely copied
128 * from mcc_wrb */
129 be_dws_le_to_cpu(compl, 4);
130
131 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
132 CQE_STATUS_COMPL_MASK;
133
134 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
135
136 if (resp_hdr) {
137 opcode = resp_hdr->opcode;
138 subsystem = resp_hdr->subsystem;
139 }
140
141 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
142 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
143 (subsystem == CMD_SUBSYSTEM_COMMON)) {
144 adapter->flash_status = compl_status;
145 complete(&adapter->flash_compl);
146 }
147
148 if (compl_status == MCC_STATUS_SUCCESS) {
149 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
150 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
151 (subsystem == CMD_SUBSYSTEM_ETH)) {
152 be_parse_stats(adapter);
153 adapter->stats_cmd_sent = false;
154 }
155 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
156 subsystem == CMD_SUBSYSTEM_COMMON) {
157 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
158 (void *)resp_hdr;
159 adapter->drv_stats.be_on_die_temperature =
160 resp->on_die_temperature;
161 }
162 } else {
163 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
164 adapter->be_get_temp_freq = 0;
165
166 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
167 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
168 goto done;
169
170 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
171 dev_warn(&adapter->pdev->dev,
172 "VF is not privileged to issue opcode %d-%d\n",
173 opcode, subsystem);
174 } else {
175 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
176 CQE_STATUS_EXTD_MASK;
177 dev_err(&adapter->pdev->dev,
178 "opcode %d-%d failed:status %d-%d\n",
179 opcode, subsystem, compl_status, extd_status);
180 }
181 }
182 done:
183 return compl_status;
184 }
185
186 /* Link state evt is a string of bytes; no need for endian swapping */
187 static void be_async_link_state_process(struct be_adapter *adapter,
188 struct be_async_event_link_state *evt)
189 {
190 /* When link status changes, link speed must be re-queried from FW */
191 adapter->phy.link_speed = -1;
192
193 /* Ignore physical link event */
194 if (lancer_chip(adapter) &&
195 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
196 return;
197
198 /* For the initial link status do not rely on the ASYNC event as
199 * it may not be received in some cases.
200 */
201 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
202 be_link_status_update(adapter, evt->port_link_status);
203 }
204
205 /* Grp5 CoS Priority evt */
206 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
207 struct be_async_event_grp5_cos_priority *evt)
208 {
209 if (evt->valid) {
210 adapter->vlan_prio_bmap = evt->available_priority_bmap;
211 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
212 adapter->recommended_prio =
213 evt->reco_default_priority << VLAN_PRIO_SHIFT;
214 }
215 }
216
217 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
218 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
219 struct be_async_event_grp5_qos_link_speed *evt)
220 {
221 if (adapter->phy.link_speed >= 0 &&
222 evt->physical_port == adapter->port_num)
223 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
224 }
225
226 /*Grp5 PVID evt*/
227 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
228 struct be_async_event_grp5_pvid_state *evt)
229 {
230 if (evt->enabled)
231 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
232 else
233 adapter->pvid = 0;
234 }
235
236 static void be_async_grp5_evt_process(struct be_adapter *adapter,
237 u32 trailer, struct be_mcc_compl *evt)
238 {
239 u8 event_type = 0;
240
241 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
242 ASYNC_TRAILER_EVENT_TYPE_MASK;
243
244 switch (event_type) {
245 case ASYNC_EVENT_COS_PRIORITY:
246 be_async_grp5_cos_priority_process(adapter,
247 (struct be_async_event_grp5_cos_priority *)evt);
248 break;
249 case ASYNC_EVENT_QOS_SPEED:
250 be_async_grp5_qos_speed_process(adapter,
251 (struct be_async_event_grp5_qos_link_speed *)evt);
252 break;
253 case ASYNC_EVENT_PVID_STATE:
254 be_async_grp5_pvid_state_process(adapter,
255 (struct be_async_event_grp5_pvid_state *)evt);
256 break;
257 default:
258 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
259 break;
260 }
261 }
262
263 static inline bool is_link_state_evt(u32 trailer)
264 {
265 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
266 ASYNC_TRAILER_EVENT_CODE_MASK) ==
267 ASYNC_EVENT_CODE_LINK_STATE;
268 }
269
270 static inline bool is_grp5_evt(u32 trailer)
271 {
272 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
273 ASYNC_TRAILER_EVENT_CODE_MASK) ==
274 ASYNC_EVENT_CODE_GRP_5);
275 }
276
277 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
278 {
279 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
280 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
281
282 if (be_mcc_compl_is_new(compl)) {
283 queue_tail_inc(mcc_cq);
284 return compl;
285 }
286 return NULL;
287 }
288
289 void be_async_mcc_enable(struct be_adapter *adapter)
290 {
291 spin_lock_bh(&adapter->mcc_cq_lock);
292
293 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
294 adapter->mcc_obj.rearm_cq = true;
295
296 spin_unlock_bh(&adapter->mcc_cq_lock);
297 }
298
299 void be_async_mcc_disable(struct be_adapter *adapter)
300 {
301 adapter->mcc_obj.rearm_cq = false;
302 }
303
304 int be_process_mcc(struct be_adapter *adapter)
305 {
306 struct be_mcc_compl *compl;
307 int num = 0, status = 0;
308 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
309
310 spin_lock(&adapter->mcc_cq_lock);
311 while ((compl = be_mcc_compl_get(adapter))) {
312 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
313 /* Interpret flags as an async trailer */
314 if (is_link_state_evt(compl->flags))
315 be_async_link_state_process(adapter,
316 (struct be_async_event_link_state *) compl);
317 else if (is_grp5_evt(compl->flags))
318 be_async_grp5_evt_process(adapter,
319 compl->flags, compl);
320 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
321 status = be_mcc_compl_process(adapter, compl);
322 atomic_dec(&mcc_obj->q.used);
323 }
324 be_mcc_compl_use(compl);
325 num++;
326 }
327
328 if (num)
329 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
330
331 spin_unlock(&adapter->mcc_cq_lock);
332 return status;
333 }
334
335 /* Wait till no more pending mcc requests are present */
336 static int be_mcc_wait_compl(struct be_adapter *adapter)
337 {
338 #define mcc_timeout 120000 /* 12s timeout */
339 int i, status = 0;
340 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
341
342 for (i = 0; i < mcc_timeout; i++) {
343 if (be_error(adapter))
344 return -EIO;
345
346 local_bh_disable();
347 status = be_process_mcc(adapter);
348 local_bh_enable();
349
350 if (atomic_read(&mcc_obj->q.used) == 0)
351 break;
352 udelay(100);
353 }
354 if (i == mcc_timeout) {
355 dev_err(&adapter->pdev->dev, "FW not responding\n");
356 adapter->fw_timeout = true;
357 return -EIO;
358 }
359 return status;
360 }
361
362 /* Notify MCC requests and wait for completion */
363 static int be_mcc_notify_wait(struct be_adapter *adapter)
364 {
365 int status;
366 struct be_mcc_wrb *wrb;
367 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
368 u16 index = mcc_obj->q.head;
369 struct be_cmd_resp_hdr *resp;
370
371 index_dec(&index, mcc_obj->q.len);
372 wrb = queue_index_node(&mcc_obj->q, index);
373
374 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
375
376 be_mcc_notify(adapter);
377
378 status = be_mcc_wait_compl(adapter);
379 if (status == -EIO)
380 goto out;
381
382 status = resp->status;
383 out:
384 return status;
385 }
386
387 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
388 {
389 int msecs = 0;
390 u32 ready;
391
392 do {
393 if (be_error(adapter))
394 return -EIO;
395
396 ready = ioread32(db);
397 if (ready == 0xffffffff)
398 return -1;
399
400 ready &= MPU_MAILBOX_DB_RDY_MASK;
401 if (ready)
402 break;
403
404 if (msecs > 4000) {
405 dev_err(&adapter->pdev->dev, "FW not responding\n");
406 adapter->fw_timeout = true;
407 be_detect_error(adapter);
408 return -1;
409 }
410
411 msleep(1);
412 msecs++;
413 } while (true);
414
415 return 0;
416 }
417
418 /*
419 * Insert the mailbox address into the doorbell in two steps
420 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
421 */
422 static int be_mbox_notify_wait(struct be_adapter *adapter)
423 {
424 int status;
425 u32 val = 0;
426 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
427 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
428 struct be_mcc_mailbox *mbox = mbox_mem->va;
429 struct be_mcc_compl *compl = &mbox->compl;
430
431 /* wait for ready to be set */
432 status = be_mbox_db_ready_wait(adapter, db);
433 if (status != 0)
434 return status;
435
436 val |= MPU_MAILBOX_DB_HI_MASK;
437 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
438 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
439 iowrite32(val, db);
440
441 /* wait for ready to be set */
442 status = be_mbox_db_ready_wait(adapter, db);
443 if (status != 0)
444 return status;
445
446 val = 0;
447 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
448 val |= (u32)(mbox_mem->dma >> 4) << 2;
449 iowrite32(val, db);
450
451 status = be_mbox_db_ready_wait(adapter, db);
452 if (status != 0)
453 return status;
454
455 /* A cq entry has been made now */
456 if (be_mcc_compl_is_new(compl)) {
457 status = be_mcc_compl_process(adapter, &mbox->compl);
458 be_mcc_compl_use(compl);
459 if (status)
460 return status;
461 } else {
462 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
463 return -1;
464 }
465 return 0;
466 }
467
468 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
469 {
470 u32 sem;
471 u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
472 SLIPORT_SEMAPHORE_OFFSET_BE;
473
474 pci_read_config_dword(adapter->pdev, reg, &sem);
475 *stage = sem & POST_STAGE_MASK;
476
477 if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
478 return -1;
479 else
480 return 0;
481 }
482
483 int lancer_wait_ready(struct be_adapter *adapter)
484 {
485 #define SLIPORT_READY_TIMEOUT 30
486 u32 sliport_status;
487 int status = 0, i;
488
489 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
490 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
491 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
492 break;
493
494 msleep(1000);
495 }
496
497 if (i == SLIPORT_READY_TIMEOUT)
498 status = -1;
499
500 return status;
501 }
502
503 static bool lancer_provisioning_error(struct be_adapter *adapter)
504 {
505 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
506 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
507 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
508 sliport_err1 = ioread32(adapter->db +
509 SLIPORT_ERROR1_OFFSET);
510 sliport_err2 = ioread32(adapter->db +
511 SLIPORT_ERROR2_OFFSET);
512
513 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
514 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
515 return true;
516 }
517 return false;
518 }
519
520 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
521 {
522 int status;
523 u32 sliport_status, err, reset_needed;
524 bool resource_error;
525
526 resource_error = lancer_provisioning_error(adapter);
527 if (resource_error)
528 return -1;
529
530 status = lancer_wait_ready(adapter);
531 if (!status) {
532 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
533 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
534 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
535 if (err && reset_needed) {
536 iowrite32(SLI_PORT_CONTROL_IP_MASK,
537 adapter->db + SLIPORT_CONTROL_OFFSET);
538
539 /* check adapter has corrected the error */
540 status = lancer_wait_ready(adapter);
541 sliport_status = ioread32(adapter->db +
542 SLIPORT_STATUS_OFFSET);
543 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
544 SLIPORT_STATUS_RN_MASK);
545 if (status || sliport_status)
546 status = -1;
547 } else if (err || reset_needed) {
548 status = -1;
549 }
550 }
551 /* Stop error recovery if error is not recoverable.
552 * No resource error is temporary errors and will go away
553 * when PF provisions resources.
554 */
555 resource_error = lancer_provisioning_error(adapter);
556 if (status == -1 && !resource_error)
557 adapter->eeh_error = true;
558
559 return status;
560 }
561
562 int be_fw_wait_ready(struct be_adapter *adapter)
563 {
564 u16 stage;
565 int status, timeout = 0;
566 struct device *dev = &adapter->pdev->dev;
567
568 if (lancer_chip(adapter)) {
569 status = lancer_wait_ready(adapter);
570 return status;
571 }
572
573 do {
574 status = be_POST_stage_get(adapter, &stage);
575 if (status) {
576 dev_err(dev, "POST error; stage=0x%x\n", stage);
577 return -1;
578 } else if (stage != POST_STAGE_ARMFW_RDY) {
579 if (msleep_interruptible(2000)) {
580 dev_err(dev, "Waiting for POST aborted\n");
581 return -EINTR;
582 }
583 timeout += 2;
584 } else {
585 return 0;
586 }
587 } while (timeout < 60);
588
589 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
590 return -1;
591 }
592
593
594 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
595 {
596 return &wrb->payload.sgl[0];
597 }
598
599
600 /* Don't touch the hdr after it's prepared */
601 /* mem will be NULL for embedded commands */
602 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
603 u8 subsystem, u8 opcode, int cmd_len,
604 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
605 {
606 struct be_sge *sge;
607 unsigned long addr = (unsigned long)req_hdr;
608 u64 req_addr = addr;
609
610 req_hdr->opcode = opcode;
611 req_hdr->subsystem = subsystem;
612 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
613 req_hdr->version = 0;
614
615 wrb->tag0 = req_addr & 0xFFFFFFFF;
616 wrb->tag1 = upper_32_bits(req_addr);
617
618 wrb->payload_length = cmd_len;
619 if (mem) {
620 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
621 MCC_WRB_SGE_CNT_SHIFT;
622 sge = nonembedded_sgl(wrb);
623 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
624 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
625 sge->len = cpu_to_le32(mem->size);
626 } else
627 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
628 be_dws_cpu_to_le(wrb, 8);
629 }
630
631 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
632 struct be_dma_mem *mem)
633 {
634 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
635 u64 dma = (u64)mem->dma;
636
637 for (i = 0; i < buf_pages; i++) {
638 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
639 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
640 dma += PAGE_SIZE_4K;
641 }
642 }
643
644 /* Converts interrupt delay in microseconds to multiplier value */
645 static u32 eq_delay_to_mult(u32 usec_delay)
646 {
647 #define MAX_INTR_RATE 651042
648 const u32 round = 10;
649 u32 multiplier;
650
651 if (usec_delay == 0)
652 multiplier = 0;
653 else {
654 u32 interrupt_rate = 1000000 / usec_delay;
655 /* Max delay, corresponding to the lowest interrupt rate */
656 if (interrupt_rate == 0)
657 multiplier = 1023;
658 else {
659 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
660 multiplier /= interrupt_rate;
661 /* Round the multiplier to the closest value.*/
662 multiplier = (multiplier + round/2) / round;
663 multiplier = min(multiplier, (u32)1023);
664 }
665 }
666 return multiplier;
667 }
668
669 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
670 {
671 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
672 struct be_mcc_wrb *wrb
673 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
674 memset(wrb, 0, sizeof(*wrb));
675 return wrb;
676 }
677
678 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
679 {
680 struct be_queue_info *mccq = &adapter->mcc_obj.q;
681 struct be_mcc_wrb *wrb;
682
683 if (!mccq->created)
684 return NULL;
685
686 if (atomic_read(&mccq->used) >= mccq->len) {
687 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
688 return NULL;
689 }
690
691 wrb = queue_head_node(mccq);
692 queue_head_inc(mccq);
693 atomic_inc(&mccq->used);
694 memset(wrb, 0, sizeof(*wrb));
695 return wrb;
696 }
697
698 /* Tell fw we're about to start firing cmds by writing a
699 * special pattern across the wrb hdr; uses mbox
700 */
701 int be_cmd_fw_init(struct be_adapter *adapter)
702 {
703 u8 *wrb;
704 int status;
705
706 if (lancer_chip(adapter))
707 return 0;
708
709 if (mutex_lock_interruptible(&adapter->mbox_lock))
710 return -1;
711
712 wrb = (u8 *)wrb_from_mbox(adapter);
713 *wrb++ = 0xFF;
714 *wrb++ = 0x12;
715 *wrb++ = 0x34;
716 *wrb++ = 0xFF;
717 *wrb++ = 0xFF;
718 *wrb++ = 0x56;
719 *wrb++ = 0x78;
720 *wrb = 0xFF;
721
722 status = be_mbox_notify_wait(adapter);
723
724 mutex_unlock(&adapter->mbox_lock);
725 return status;
726 }
727
728 /* Tell fw we're done with firing cmds by writing a
729 * special pattern across the wrb hdr; uses mbox
730 */
731 int be_cmd_fw_clean(struct be_adapter *adapter)
732 {
733 u8 *wrb;
734 int status;
735
736 if (lancer_chip(adapter))
737 return 0;
738
739 if (mutex_lock_interruptible(&adapter->mbox_lock))
740 return -1;
741
742 wrb = (u8 *)wrb_from_mbox(adapter);
743 *wrb++ = 0xFF;
744 *wrb++ = 0xAA;
745 *wrb++ = 0xBB;
746 *wrb++ = 0xFF;
747 *wrb++ = 0xFF;
748 *wrb++ = 0xCC;
749 *wrb++ = 0xDD;
750 *wrb = 0xFF;
751
752 status = be_mbox_notify_wait(adapter);
753
754 mutex_unlock(&adapter->mbox_lock);
755 return status;
756 }
757
758 int be_cmd_eq_create(struct be_adapter *adapter,
759 struct be_queue_info *eq, int eq_delay)
760 {
761 struct be_mcc_wrb *wrb;
762 struct be_cmd_req_eq_create *req;
763 struct be_dma_mem *q_mem = &eq->dma_mem;
764 int status;
765
766 if (mutex_lock_interruptible(&adapter->mbox_lock))
767 return -1;
768
769 wrb = wrb_from_mbox(adapter);
770 req = embedded_payload(wrb);
771
772 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
773 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
774
775 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
776
777 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
778 /* 4byte eqe*/
779 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
780 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
781 __ilog2_u32(eq->len/256));
782 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
783 eq_delay_to_mult(eq_delay));
784 be_dws_cpu_to_le(req->context, sizeof(req->context));
785
786 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
787
788 status = be_mbox_notify_wait(adapter);
789 if (!status) {
790 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
791 eq->id = le16_to_cpu(resp->eq_id);
792 eq->created = true;
793 }
794
795 mutex_unlock(&adapter->mbox_lock);
796 return status;
797 }
798
799 /* Use MCC */
800 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
801 bool permanent, u32 if_handle, u32 pmac_id)
802 {
803 struct be_mcc_wrb *wrb;
804 struct be_cmd_req_mac_query *req;
805 int status;
806
807 spin_lock_bh(&adapter->mcc_lock);
808
809 wrb = wrb_from_mccq(adapter);
810 if (!wrb) {
811 status = -EBUSY;
812 goto err;
813 }
814 req = embedded_payload(wrb);
815
816 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
817 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
818 req->type = MAC_ADDRESS_TYPE_NETWORK;
819 if (permanent) {
820 req->permanent = 1;
821 } else {
822 req->if_id = cpu_to_le16((u16) if_handle);
823 req->pmac_id = cpu_to_le32(pmac_id);
824 req->permanent = 0;
825 }
826
827 status = be_mcc_notify_wait(adapter);
828 if (!status) {
829 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
830 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
831 }
832
833 err:
834 spin_unlock_bh(&adapter->mcc_lock);
835 return status;
836 }
837
838 /* Uses synchronous MCCQ */
839 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
840 u32 if_id, u32 *pmac_id, u32 domain)
841 {
842 struct be_mcc_wrb *wrb;
843 struct be_cmd_req_pmac_add *req;
844 int status;
845
846 spin_lock_bh(&adapter->mcc_lock);
847
848 wrb = wrb_from_mccq(adapter);
849 if (!wrb) {
850 status = -EBUSY;
851 goto err;
852 }
853 req = embedded_payload(wrb);
854
855 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
856 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
857
858 req->hdr.domain = domain;
859 req->if_id = cpu_to_le32(if_id);
860 memcpy(req->mac_address, mac_addr, ETH_ALEN);
861
862 status = be_mcc_notify_wait(adapter);
863 if (!status) {
864 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
865 *pmac_id = le32_to_cpu(resp->pmac_id);
866 }
867
868 err:
869 spin_unlock_bh(&adapter->mcc_lock);
870
871 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
872 status = -EPERM;
873
874 return status;
875 }
876
877 /* Uses synchronous MCCQ */
878 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
879 {
880 struct be_mcc_wrb *wrb;
881 struct be_cmd_req_pmac_del *req;
882 int status;
883
884 if (pmac_id == -1)
885 return 0;
886
887 spin_lock_bh(&adapter->mcc_lock);
888
889 wrb = wrb_from_mccq(adapter);
890 if (!wrb) {
891 status = -EBUSY;
892 goto err;
893 }
894 req = embedded_payload(wrb);
895
896 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
897 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
898
899 req->hdr.domain = dom;
900 req->if_id = cpu_to_le32(if_id);
901 req->pmac_id = cpu_to_le32(pmac_id);
902
903 status = be_mcc_notify_wait(adapter);
904
905 err:
906 spin_unlock_bh(&adapter->mcc_lock);
907 return status;
908 }
909
910 /* Uses Mbox */
911 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
912 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
913 {
914 struct be_mcc_wrb *wrb;
915 struct be_cmd_req_cq_create *req;
916 struct be_dma_mem *q_mem = &cq->dma_mem;
917 void *ctxt;
918 int status;
919
920 if (mutex_lock_interruptible(&adapter->mbox_lock))
921 return -1;
922
923 wrb = wrb_from_mbox(adapter);
924 req = embedded_payload(wrb);
925 ctxt = &req->context;
926
927 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
928 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
929
930 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
931 if (lancer_chip(adapter)) {
932 req->hdr.version = 2;
933 req->page_size = 1; /* 1 for 4K */
934 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
935 no_delay);
936 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
937 __ilog2_u32(cq->len/256));
938 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
939 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
940 ctxt, 1);
941 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
942 ctxt, eq->id);
943 } else {
944 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
945 coalesce_wm);
946 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
947 ctxt, no_delay);
948 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
949 __ilog2_u32(cq->len/256));
950 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
951 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
952 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
953 }
954
955 be_dws_cpu_to_le(ctxt, sizeof(req->context));
956
957 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
958
959 status = be_mbox_notify_wait(adapter);
960 if (!status) {
961 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
962 cq->id = le16_to_cpu(resp->cq_id);
963 cq->created = true;
964 }
965
966 mutex_unlock(&adapter->mbox_lock);
967
968 return status;
969 }
970
971 static u32 be_encoded_q_len(int q_len)
972 {
973 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
974 if (len_encoded == 16)
975 len_encoded = 0;
976 return len_encoded;
977 }
978
979 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
980 struct be_queue_info *mccq,
981 struct be_queue_info *cq)
982 {
983 struct be_mcc_wrb *wrb;
984 struct be_cmd_req_mcc_ext_create *req;
985 struct be_dma_mem *q_mem = &mccq->dma_mem;
986 void *ctxt;
987 int status;
988
989 if (mutex_lock_interruptible(&adapter->mbox_lock))
990 return -1;
991
992 wrb = wrb_from_mbox(adapter);
993 req = embedded_payload(wrb);
994 ctxt = &req->context;
995
996 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
997 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
998
999 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1000 if (lancer_chip(adapter)) {
1001 req->hdr.version = 1;
1002 req->cq_id = cpu_to_le16(cq->id);
1003
1004 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1005 be_encoded_q_len(mccq->len));
1006 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1007 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1008 ctxt, cq->id);
1009 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1010 ctxt, 1);
1011
1012 } else {
1013 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1014 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1015 be_encoded_q_len(mccq->len));
1016 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1017 }
1018
1019 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1020 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1021 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1022
1023 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1024
1025 status = be_mbox_notify_wait(adapter);
1026 if (!status) {
1027 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1028 mccq->id = le16_to_cpu(resp->id);
1029 mccq->created = true;
1030 }
1031 mutex_unlock(&adapter->mbox_lock);
1032
1033 return status;
1034 }
1035
1036 int be_cmd_mccq_org_create(struct be_adapter *adapter,
1037 struct be_queue_info *mccq,
1038 struct be_queue_info *cq)
1039 {
1040 struct be_mcc_wrb *wrb;
1041 struct be_cmd_req_mcc_create *req;
1042 struct be_dma_mem *q_mem = &mccq->dma_mem;
1043 void *ctxt;
1044 int status;
1045
1046 if (mutex_lock_interruptible(&adapter->mbox_lock))
1047 return -1;
1048
1049 wrb = wrb_from_mbox(adapter);
1050 req = embedded_payload(wrb);
1051 ctxt = &req->context;
1052
1053 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1054 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1055
1056 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1057
1058 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1059 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1060 be_encoded_q_len(mccq->len));
1061 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1062
1063 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1064
1065 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1066
1067 status = be_mbox_notify_wait(adapter);
1068 if (!status) {
1069 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1070 mccq->id = le16_to_cpu(resp->id);
1071 mccq->created = true;
1072 }
1073
1074 mutex_unlock(&adapter->mbox_lock);
1075 return status;
1076 }
1077
1078 int be_cmd_mccq_create(struct be_adapter *adapter,
1079 struct be_queue_info *mccq,
1080 struct be_queue_info *cq)
1081 {
1082 int status;
1083
1084 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1085 if (status && !lancer_chip(adapter)) {
1086 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1087 "or newer to avoid conflicting priorities between NIC "
1088 "and FCoE traffic");
1089 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1090 }
1091 return status;
1092 }
1093
1094 int be_cmd_txq_create(struct be_adapter *adapter,
1095 struct be_queue_info *txq,
1096 struct be_queue_info *cq)
1097 {
1098 struct be_mcc_wrb *wrb;
1099 struct be_cmd_req_eth_tx_create *req;
1100 struct be_dma_mem *q_mem = &txq->dma_mem;
1101 void *ctxt;
1102 int status;
1103
1104 spin_lock_bh(&adapter->mcc_lock);
1105
1106 wrb = wrb_from_mccq(adapter);
1107 if (!wrb) {
1108 status = -EBUSY;
1109 goto err;
1110 }
1111
1112 req = embedded_payload(wrb);
1113 ctxt = &req->context;
1114
1115 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1116 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1117
1118 if (lancer_chip(adapter)) {
1119 req->hdr.version = 1;
1120 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1121 adapter->if_handle);
1122 }
1123
1124 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1125 req->ulp_num = BE_ULP1_NUM;
1126 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1127
1128 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1129 be_encoded_q_len(txq->len));
1130 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1131 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1132
1133 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1134
1135 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1136
1137 status = be_mcc_notify_wait(adapter);
1138 if (!status) {
1139 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1140 txq->id = le16_to_cpu(resp->cid);
1141 txq->created = true;
1142 }
1143
1144 err:
1145 spin_unlock_bh(&adapter->mcc_lock);
1146
1147 return status;
1148 }
1149
1150 /* Uses MCC */
1151 int be_cmd_rxq_create(struct be_adapter *adapter,
1152 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1153 u32 if_id, u32 rss, u8 *rss_id)
1154 {
1155 struct be_mcc_wrb *wrb;
1156 struct be_cmd_req_eth_rx_create *req;
1157 struct be_dma_mem *q_mem = &rxq->dma_mem;
1158 int status;
1159
1160 spin_lock_bh(&adapter->mcc_lock);
1161
1162 wrb = wrb_from_mccq(adapter);
1163 if (!wrb) {
1164 status = -EBUSY;
1165 goto err;
1166 }
1167 req = embedded_payload(wrb);
1168
1169 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1170 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1171
1172 req->cq_id = cpu_to_le16(cq_id);
1173 req->frag_size = fls(frag_size) - 1;
1174 req->num_pages = 2;
1175 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1176 req->interface_id = cpu_to_le32(if_id);
1177 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1178 req->rss_queue = cpu_to_le32(rss);
1179
1180 status = be_mcc_notify_wait(adapter);
1181 if (!status) {
1182 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1183 rxq->id = le16_to_cpu(resp->id);
1184 rxq->created = true;
1185 *rss_id = resp->rss_id;
1186 }
1187
1188 err:
1189 spin_unlock_bh(&adapter->mcc_lock);
1190 return status;
1191 }
1192
1193 /* Generic destroyer function for all types of queues
1194 * Uses Mbox
1195 */
1196 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1197 int queue_type)
1198 {
1199 struct be_mcc_wrb *wrb;
1200 struct be_cmd_req_q_destroy *req;
1201 u8 subsys = 0, opcode = 0;
1202 int status;
1203
1204 if (mutex_lock_interruptible(&adapter->mbox_lock))
1205 return -1;
1206
1207 wrb = wrb_from_mbox(adapter);
1208 req = embedded_payload(wrb);
1209
1210 switch (queue_type) {
1211 case QTYPE_EQ:
1212 subsys = CMD_SUBSYSTEM_COMMON;
1213 opcode = OPCODE_COMMON_EQ_DESTROY;
1214 break;
1215 case QTYPE_CQ:
1216 subsys = CMD_SUBSYSTEM_COMMON;
1217 opcode = OPCODE_COMMON_CQ_DESTROY;
1218 break;
1219 case QTYPE_TXQ:
1220 subsys = CMD_SUBSYSTEM_ETH;
1221 opcode = OPCODE_ETH_TX_DESTROY;
1222 break;
1223 case QTYPE_RXQ:
1224 subsys = CMD_SUBSYSTEM_ETH;
1225 opcode = OPCODE_ETH_RX_DESTROY;
1226 break;
1227 case QTYPE_MCCQ:
1228 subsys = CMD_SUBSYSTEM_COMMON;
1229 opcode = OPCODE_COMMON_MCC_DESTROY;
1230 break;
1231 default:
1232 BUG();
1233 }
1234
1235 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1236 NULL);
1237 req->id = cpu_to_le16(q->id);
1238
1239 status = be_mbox_notify_wait(adapter);
1240 q->created = false;
1241
1242 mutex_unlock(&adapter->mbox_lock);
1243 return status;
1244 }
1245
1246 /* Uses MCC */
1247 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1248 {
1249 struct be_mcc_wrb *wrb;
1250 struct be_cmd_req_q_destroy *req;
1251 int status;
1252
1253 spin_lock_bh(&adapter->mcc_lock);
1254
1255 wrb = wrb_from_mccq(adapter);
1256 if (!wrb) {
1257 status = -EBUSY;
1258 goto err;
1259 }
1260 req = embedded_payload(wrb);
1261
1262 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1263 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1264 req->id = cpu_to_le16(q->id);
1265
1266 status = be_mcc_notify_wait(adapter);
1267 q->created = false;
1268
1269 err:
1270 spin_unlock_bh(&adapter->mcc_lock);
1271 return status;
1272 }
1273
1274 /* Create an rx filtering policy configuration on an i/f
1275 * Uses MCCQ
1276 */
1277 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1278 u32 *if_handle, u32 domain)
1279 {
1280 struct be_mcc_wrb *wrb;
1281 struct be_cmd_req_if_create *req;
1282 int status;
1283
1284 spin_lock_bh(&adapter->mcc_lock);
1285
1286 wrb = wrb_from_mccq(adapter);
1287 if (!wrb) {
1288 status = -EBUSY;
1289 goto err;
1290 }
1291 req = embedded_payload(wrb);
1292
1293 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1294 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1295 req->hdr.domain = domain;
1296 req->capability_flags = cpu_to_le32(cap_flags);
1297 req->enable_flags = cpu_to_le32(en_flags);
1298
1299 req->pmac_invalid = true;
1300
1301 status = be_mcc_notify_wait(adapter);
1302 if (!status) {
1303 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1304 *if_handle = le32_to_cpu(resp->interface_id);
1305 }
1306
1307 err:
1308 spin_unlock_bh(&adapter->mcc_lock);
1309 return status;
1310 }
1311
1312 /* Uses MCCQ */
1313 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1314 {
1315 struct be_mcc_wrb *wrb;
1316 struct be_cmd_req_if_destroy *req;
1317 int status;
1318
1319 if (interface_id == -1)
1320 return 0;
1321
1322 spin_lock_bh(&adapter->mcc_lock);
1323
1324 wrb = wrb_from_mccq(adapter);
1325 if (!wrb) {
1326 status = -EBUSY;
1327 goto err;
1328 }
1329 req = embedded_payload(wrb);
1330
1331 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1332 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1333 req->hdr.domain = domain;
1334 req->interface_id = cpu_to_le32(interface_id);
1335
1336 status = be_mcc_notify_wait(adapter);
1337 err:
1338 spin_unlock_bh(&adapter->mcc_lock);
1339 return status;
1340 }
1341
1342 /* Get stats is a non embedded command: the request is not embedded inside
1343 * WRB but is a separate dma memory block
1344 * Uses asynchronous MCC
1345 */
1346 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1347 {
1348 struct be_mcc_wrb *wrb;
1349 struct be_cmd_req_hdr *hdr;
1350 int status = 0;
1351
1352 spin_lock_bh(&adapter->mcc_lock);
1353
1354 wrb = wrb_from_mccq(adapter);
1355 if (!wrb) {
1356 status = -EBUSY;
1357 goto err;
1358 }
1359 hdr = nonemb_cmd->va;
1360
1361 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1362 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1363
1364 /* version 1 of the cmd is not supported only by BE2 */
1365 if (!BE2_chip(adapter))
1366 hdr->version = 1;
1367
1368 be_mcc_notify(adapter);
1369 adapter->stats_cmd_sent = true;
1370
1371 err:
1372 spin_unlock_bh(&adapter->mcc_lock);
1373 return status;
1374 }
1375
1376 /* Lancer Stats */
1377 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1378 struct be_dma_mem *nonemb_cmd)
1379 {
1380
1381 struct be_mcc_wrb *wrb;
1382 struct lancer_cmd_req_pport_stats *req;
1383 int status = 0;
1384
1385 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1386 CMD_SUBSYSTEM_ETH))
1387 return -EPERM;
1388
1389 spin_lock_bh(&adapter->mcc_lock);
1390
1391 wrb = wrb_from_mccq(adapter);
1392 if (!wrb) {
1393 status = -EBUSY;
1394 goto err;
1395 }
1396 req = nonemb_cmd->va;
1397
1398 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1399 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1400 nonemb_cmd);
1401
1402 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1403 req->cmd_params.params.reset_stats = 0;
1404
1405 be_mcc_notify(adapter);
1406 adapter->stats_cmd_sent = true;
1407
1408 err:
1409 spin_unlock_bh(&adapter->mcc_lock);
1410 return status;
1411 }
1412
1413 static int be_mac_to_link_speed(int mac_speed)
1414 {
1415 switch (mac_speed) {
1416 case PHY_LINK_SPEED_ZERO:
1417 return 0;
1418 case PHY_LINK_SPEED_10MBPS:
1419 return 10;
1420 case PHY_LINK_SPEED_100MBPS:
1421 return 100;
1422 case PHY_LINK_SPEED_1GBPS:
1423 return 1000;
1424 case PHY_LINK_SPEED_10GBPS:
1425 return 10000;
1426 }
1427 return 0;
1428 }
1429
1430 /* Uses synchronous mcc
1431 * Returns link_speed in Mbps
1432 */
1433 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1434 u8 *link_status, u32 dom)
1435 {
1436 struct be_mcc_wrb *wrb;
1437 struct be_cmd_req_link_status *req;
1438 int status;
1439
1440 spin_lock_bh(&adapter->mcc_lock);
1441
1442 if (link_status)
1443 *link_status = LINK_DOWN;
1444
1445 wrb = wrb_from_mccq(adapter);
1446 if (!wrb) {
1447 status = -EBUSY;
1448 goto err;
1449 }
1450 req = embedded_payload(wrb);
1451
1452 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1453 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1454
1455 /* version 1 of the cmd is not supported only by BE2 */
1456 if (!BE2_chip(adapter))
1457 req->hdr.version = 1;
1458
1459 req->hdr.domain = dom;
1460
1461 status = be_mcc_notify_wait(adapter);
1462 if (!status) {
1463 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1464 if (link_speed) {
1465 *link_speed = resp->link_speed ?
1466 le16_to_cpu(resp->link_speed) * 10 :
1467 be_mac_to_link_speed(resp->mac_speed);
1468
1469 if (!resp->logical_link_status)
1470 *link_speed = 0;
1471 }
1472 if (link_status)
1473 *link_status = resp->logical_link_status;
1474 }
1475
1476 err:
1477 spin_unlock_bh(&adapter->mcc_lock);
1478 return status;
1479 }
1480
1481 /* Uses synchronous mcc */
1482 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1483 {
1484 struct be_mcc_wrb *wrb;
1485 struct be_cmd_req_get_cntl_addnl_attribs *req;
1486 int status;
1487
1488 spin_lock_bh(&adapter->mcc_lock);
1489
1490 wrb = wrb_from_mccq(adapter);
1491 if (!wrb) {
1492 status = -EBUSY;
1493 goto err;
1494 }
1495 req = embedded_payload(wrb);
1496
1497 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1498 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1499 wrb, NULL);
1500
1501 be_mcc_notify(adapter);
1502
1503 err:
1504 spin_unlock_bh(&adapter->mcc_lock);
1505 return status;
1506 }
1507
1508 /* Uses synchronous mcc */
1509 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1510 {
1511 struct be_mcc_wrb *wrb;
1512 struct be_cmd_req_get_fat *req;
1513 int status;
1514
1515 spin_lock_bh(&adapter->mcc_lock);
1516
1517 wrb = wrb_from_mccq(adapter);
1518 if (!wrb) {
1519 status = -EBUSY;
1520 goto err;
1521 }
1522 req = embedded_payload(wrb);
1523
1524 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1525 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1526 req->fat_operation = cpu_to_le32(QUERY_FAT);
1527 status = be_mcc_notify_wait(adapter);
1528 if (!status) {
1529 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1530 if (log_size && resp->log_size)
1531 *log_size = le32_to_cpu(resp->log_size) -
1532 sizeof(u32);
1533 }
1534 err:
1535 spin_unlock_bh(&adapter->mcc_lock);
1536 return status;
1537 }
1538
1539 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1540 {
1541 struct be_dma_mem get_fat_cmd;
1542 struct be_mcc_wrb *wrb;
1543 struct be_cmd_req_get_fat *req;
1544 u32 offset = 0, total_size, buf_size,
1545 log_offset = sizeof(u32), payload_len;
1546 int status;
1547
1548 if (buf_len == 0)
1549 return;
1550
1551 total_size = buf_len;
1552
1553 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1554 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1555 get_fat_cmd.size,
1556 &get_fat_cmd.dma);
1557 if (!get_fat_cmd.va) {
1558 status = -ENOMEM;
1559 dev_err(&adapter->pdev->dev,
1560 "Memory allocation failure while retrieving FAT data\n");
1561 return;
1562 }
1563
1564 spin_lock_bh(&adapter->mcc_lock);
1565
1566 while (total_size) {
1567 buf_size = min(total_size, (u32)60*1024);
1568 total_size -= buf_size;
1569
1570 wrb = wrb_from_mccq(adapter);
1571 if (!wrb) {
1572 status = -EBUSY;
1573 goto err;
1574 }
1575 req = get_fat_cmd.va;
1576
1577 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1578 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1579 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1580 &get_fat_cmd);
1581
1582 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1583 req->read_log_offset = cpu_to_le32(log_offset);
1584 req->read_log_length = cpu_to_le32(buf_size);
1585 req->data_buffer_size = cpu_to_le32(buf_size);
1586
1587 status = be_mcc_notify_wait(adapter);
1588 if (!status) {
1589 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1590 memcpy(buf + offset,
1591 resp->data_buffer,
1592 le32_to_cpu(resp->read_log_length));
1593 } else {
1594 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1595 goto err;
1596 }
1597 offset += buf_size;
1598 log_offset += buf_size;
1599 }
1600 err:
1601 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1602 get_fat_cmd.va,
1603 get_fat_cmd.dma);
1604 spin_unlock_bh(&adapter->mcc_lock);
1605 }
1606
1607 /* Uses synchronous mcc */
1608 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1609 char *fw_on_flash)
1610 {
1611 struct be_mcc_wrb *wrb;
1612 struct be_cmd_req_get_fw_version *req;
1613 int status;
1614
1615 spin_lock_bh(&adapter->mcc_lock);
1616
1617 wrb = wrb_from_mccq(adapter);
1618 if (!wrb) {
1619 status = -EBUSY;
1620 goto err;
1621 }
1622
1623 req = embedded_payload(wrb);
1624
1625 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1626 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1627 status = be_mcc_notify_wait(adapter);
1628 if (!status) {
1629 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1630 strcpy(fw_ver, resp->firmware_version_string);
1631 if (fw_on_flash)
1632 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1633 }
1634 err:
1635 spin_unlock_bh(&adapter->mcc_lock);
1636 return status;
1637 }
1638
1639 /* set the EQ delay interval of an EQ to specified value
1640 * Uses async mcc
1641 */
1642 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1643 {
1644 struct be_mcc_wrb *wrb;
1645 struct be_cmd_req_modify_eq_delay *req;
1646 int status = 0;
1647
1648 spin_lock_bh(&adapter->mcc_lock);
1649
1650 wrb = wrb_from_mccq(adapter);
1651 if (!wrb) {
1652 status = -EBUSY;
1653 goto err;
1654 }
1655 req = embedded_payload(wrb);
1656
1657 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1658 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1659
1660 req->num_eq = cpu_to_le32(1);
1661 req->delay[0].eq_id = cpu_to_le32(eq_id);
1662 req->delay[0].phase = 0;
1663 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1664
1665 be_mcc_notify(adapter);
1666
1667 err:
1668 spin_unlock_bh(&adapter->mcc_lock);
1669 return status;
1670 }
1671
1672 /* Uses sycnhronous mcc */
1673 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1674 u32 num, bool untagged, bool promiscuous)
1675 {
1676 struct be_mcc_wrb *wrb;
1677 struct be_cmd_req_vlan_config *req;
1678 int status;
1679
1680 spin_lock_bh(&adapter->mcc_lock);
1681
1682 wrb = wrb_from_mccq(adapter);
1683 if (!wrb) {
1684 status = -EBUSY;
1685 goto err;
1686 }
1687 req = embedded_payload(wrb);
1688
1689 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1690 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1691
1692 req->interface_id = if_id;
1693 req->promiscuous = promiscuous;
1694 req->untagged = untagged;
1695 req->num_vlan = num;
1696 if (!promiscuous) {
1697 memcpy(req->normal_vlan, vtag_array,
1698 req->num_vlan * sizeof(vtag_array[0]));
1699 }
1700
1701 status = be_mcc_notify_wait(adapter);
1702
1703 err:
1704 spin_unlock_bh(&adapter->mcc_lock);
1705 return status;
1706 }
1707
1708 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1709 {
1710 struct be_mcc_wrb *wrb;
1711 struct be_dma_mem *mem = &adapter->rx_filter;
1712 struct be_cmd_req_rx_filter *req = mem->va;
1713 int status;
1714
1715 spin_lock_bh(&adapter->mcc_lock);
1716
1717 wrb = wrb_from_mccq(adapter);
1718 if (!wrb) {
1719 status = -EBUSY;
1720 goto err;
1721 }
1722 memset(req, 0, sizeof(*req));
1723 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1724 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1725 wrb, mem);
1726
1727 req->if_id = cpu_to_le32(adapter->if_handle);
1728 if (flags & IFF_PROMISC) {
1729 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1730 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1731 if (value == ON)
1732 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1733 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1734 } else if (flags & IFF_ALLMULTI) {
1735 req->if_flags_mask = req->if_flags =
1736 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1737 } else {
1738 struct netdev_hw_addr *ha;
1739 int i = 0;
1740
1741 req->if_flags_mask = req->if_flags =
1742 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1743
1744 /* Reset mcast promisc mode if already set by setting mask
1745 * and not setting flags field
1746 */
1747 req->if_flags_mask |=
1748 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1749 adapter->if_cap_flags);
1750
1751 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1752 netdev_for_each_mc_addr(ha, adapter->netdev)
1753 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1754 }
1755
1756 status = be_mcc_notify_wait(adapter);
1757 err:
1758 spin_unlock_bh(&adapter->mcc_lock);
1759 return status;
1760 }
1761
1762 /* Uses synchrounous mcc */
1763 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1764 {
1765 struct be_mcc_wrb *wrb;
1766 struct be_cmd_req_set_flow_control *req;
1767 int status;
1768
1769 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1770 CMD_SUBSYSTEM_COMMON))
1771 return -EPERM;
1772
1773 spin_lock_bh(&adapter->mcc_lock);
1774
1775 wrb = wrb_from_mccq(adapter);
1776 if (!wrb) {
1777 status = -EBUSY;
1778 goto err;
1779 }
1780 req = embedded_payload(wrb);
1781
1782 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1783 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1784
1785 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1786 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1787
1788 status = be_mcc_notify_wait(adapter);
1789
1790 err:
1791 spin_unlock_bh(&adapter->mcc_lock);
1792 return status;
1793 }
1794
1795 /* Uses sycn mcc */
1796 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1797 {
1798 struct be_mcc_wrb *wrb;
1799 struct be_cmd_req_get_flow_control *req;
1800 int status;
1801
1802 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1803 CMD_SUBSYSTEM_COMMON))
1804 return -EPERM;
1805
1806 spin_lock_bh(&adapter->mcc_lock);
1807
1808 wrb = wrb_from_mccq(adapter);
1809 if (!wrb) {
1810 status = -EBUSY;
1811 goto err;
1812 }
1813 req = embedded_payload(wrb);
1814
1815 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1816 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1817
1818 status = be_mcc_notify_wait(adapter);
1819 if (!status) {
1820 struct be_cmd_resp_get_flow_control *resp =
1821 embedded_payload(wrb);
1822 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1823 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1824 }
1825
1826 err:
1827 spin_unlock_bh(&adapter->mcc_lock);
1828 return status;
1829 }
1830
1831 /* Uses mbox */
1832 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1833 u32 *mode, u32 *caps)
1834 {
1835 struct be_mcc_wrb *wrb;
1836 struct be_cmd_req_query_fw_cfg *req;
1837 int status;
1838
1839 if (mutex_lock_interruptible(&adapter->mbox_lock))
1840 return -1;
1841
1842 wrb = wrb_from_mbox(adapter);
1843 req = embedded_payload(wrb);
1844
1845 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1846 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1847
1848 status = be_mbox_notify_wait(adapter);
1849 if (!status) {
1850 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1851 *port_num = le32_to_cpu(resp->phys_port);
1852 *mode = le32_to_cpu(resp->function_mode);
1853 *caps = le32_to_cpu(resp->function_caps);
1854 }
1855
1856 mutex_unlock(&adapter->mbox_lock);
1857 return status;
1858 }
1859
1860 /* Uses mbox */
1861 int be_cmd_reset_function(struct be_adapter *adapter)
1862 {
1863 struct be_mcc_wrb *wrb;
1864 struct be_cmd_req_hdr *req;
1865 int status;
1866
1867 if (lancer_chip(adapter)) {
1868 status = lancer_wait_ready(adapter);
1869 if (!status) {
1870 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1871 adapter->db + SLIPORT_CONTROL_OFFSET);
1872 status = lancer_test_and_set_rdy_state(adapter);
1873 }
1874 if (status) {
1875 dev_err(&adapter->pdev->dev,
1876 "Adapter in non recoverable error\n");
1877 }
1878 return status;
1879 }
1880
1881 if (mutex_lock_interruptible(&adapter->mbox_lock))
1882 return -1;
1883
1884 wrb = wrb_from_mbox(adapter);
1885 req = embedded_payload(wrb);
1886
1887 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1888 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1889
1890 status = be_mbox_notify_wait(adapter);
1891
1892 mutex_unlock(&adapter->mbox_lock);
1893 return status;
1894 }
1895
1896 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1897 {
1898 struct be_mcc_wrb *wrb;
1899 struct be_cmd_req_rss_config *req;
1900 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1901 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1902 0x3ea83c02, 0x4a110304};
1903 int status;
1904
1905 if (mutex_lock_interruptible(&adapter->mbox_lock))
1906 return -1;
1907
1908 wrb = wrb_from_mbox(adapter);
1909 req = embedded_payload(wrb);
1910
1911 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1912 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1913
1914 req->if_id = cpu_to_le32(adapter->if_handle);
1915 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1916 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1917
1918 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1919 req->hdr.version = 1;
1920 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1921 RSS_ENABLE_UDP_IPV6);
1922 }
1923
1924 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1925 memcpy(req->cpu_table, rsstable, table_size);
1926 memcpy(req->hash, myhash, sizeof(myhash));
1927 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1928
1929 status = be_mbox_notify_wait(adapter);
1930
1931 mutex_unlock(&adapter->mbox_lock);
1932 return status;
1933 }
1934
1935 /* Uses sync mcc */
1936 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1937 u8 bcn, u8 sts, u8 state)
1938 {
1939 struct be_mcc_wrb *wrb;
1940 struct be_cmd_req_enable_disable_beacon *req;
1941 int status;
1942
1943 spin_lock_bh(&adapter->mcc_lock);
1944
1945 wrb = wrb_from_mccq(adapter);
1946 if (!wrb) {
1947 status = -EBUSY;
1948 goto err;
1949 }
1950 req = embedded_payload(wrb);
1951
1952 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1953 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1954
1955 req->port_num = port_num;
1956 req->beacon_state = state;
1957 req->beacon_duration = bcn;
1958 req->status_duration = sts;
1959
1960 status = be_mcc_notify_wait(adapter);
1961
1962 err:
1963 spin_unlock_bh(&adapter->mcc_lock);
1964 return status;
1965 }
1966
1967 /* Uses sync mcc */
1968 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1969 {
1970 struct be_mcc_wrb *wrb;
1971 struct be_cmd_req_get_beacon_state *req;
1972 int status;
1973
1974 spin_lock_bh(&adapter->mcc_lock);
1975
1976 wrb = wrb_from_mccq(adapter);
1977 if (!wrb) {
1978 status = -EBUSY;
1979 goto err;
1980 }
1981 req = embedded_payload(wrb);
1982
1983 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1984 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1985
1986 req->port_num = port_num;
1987
1988 status = be_mcc_notify_wait(adapter);
1989 if (!status) {
1990 struct be_cmd_resp_get_beacon_state *resp =
1991 embedded_payload(wrb);
1992 *state = resp->beacon_state;
1993 }
1994
1995 err:
1996 spin_unlock_bh(&adapter->mcc_lock);
1997 return status;
1998 }
1999
2000 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2001 u32 data_size, u32 data_offset,
2002 const char *obj_name, u32 *data_written,
2003 u8 *change_status, u8 *addn_status)
2004 {
2005 struct be_mcc_wrb *wrb;
2006 struct lancer_cmd_req_write_object *req;
2007 struct lancer_cmd_resp_write_object *resp;
2008 void *ctxt = NULL;
2009 int status;
2010
2011 spin_lock_bh(&adapter->mcc_lock);
2012 adapter->flash_status = 0;
2013
2014 wrb = wrb_from_mccq(adapter);
2015 if (!wrb) {
2016 status = -EBUSY;
2017 goto err_unlock;
2018 }
2019
2020 req = embedded_payload(wrb);
2021
2022 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2023 OPCODE_COMMON_WRITE_OBJECT,
2024 sizeof(struct lancer_cmd_req_write_object), wrb,
2025 NULL);
2026
2027 ctxt = &req->context;
2028 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2029 write_length, ctxt, data_size);
2030
2031 if (data_size == 0)
2032 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2033 eof, ctxt, 1);
2034 else
2035 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2036 eof, ctxt, 0);
2037
2038 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2039 req->write_offset = cpu_to_le32(data_offset);
2040 strcpy(req->object_name, obj_name);
2041 req->descriptor_count = cpu_to_le32(1);
2042 req->buf_len = cpu_to_le32(data_size);
2043 req->addr_low = cpu_to_le32((cmd->dma +
2044 sizeof(struct lancer_cmd_req_write_object))
2045 & 0xFFFFFFFF);
2046 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2047 sizeof(struct lancer_cmd_req_write_object)));
2048
2049 be_mcc_notify(adapter);
2050 spin_unlock_bh(&adapter->mcc_lock);
2051
2052 if (!wait_for_completion_timeout(&adapter->flash_compl,
2053 msecs_to_jiffies(30000)))
2054 status = -1;
2055 else
2056 status = adapter->flash_status;
2057
2058 resp = embedded_payload(wrb);
2059 if (!status) {
2060 *data_written = le32_to_cpu(resp->actual_write_len);
2061 *change_status = resp->change_status;
2062 } else {
2063 *addn_status = resp->additional_status;
2064 }
2065
2066 return status;
2067
2068 err_unlock:
2069 spin_unlock_bh(&adapter->mcc_lock);
2070 return status;
2071 }
2072
2073 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2074 u32 data_size, u32 data_offset, const char *obj_name,
2075 u32 *data_read, u32 *eof, u8 *addn_status)
2076 {
2077 struct be_mcc_wrb *wrb;
2078 struct lancer_cmd_req_read_object *req;
2079 struct lancer_cmd_resp_read_object *resp;
2080 int status;
2081
2082 spin_lock_bh(&adapter->mcc_lock);
2083
2084 wrb = wrb_from_mccq(adapter);
2085 if (!wrb) {
2086 status = -EBUSY;
2087 goto err_unlock;
2088 }
2089
2090 req = embedded_payload(wrb);
2091
2092 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2093 OPCODE_COMMON_READ_OBJECT,
2094 sizeof(struct lancer_cmd_req_read_object), wrb,
2095 NULL);
2096
2097 req->desired_read_len = cpu_to_le32(data_size);
2098 req->read_offset = cpu_to_le32(data_offset);
2099 strcpy(req->object_name, obj_name);
2100 req->descriptor_count = cpu_to_le32(1);
2101 req->buf_len = cpu_to_le32(data_size);
2102 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2103 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2104
2105 status = be_mcc_notify_wait(adapter);
2106
2107 resp = embedded_payload(wrb);
2108 if (!status) {
2109 *data_read = le32_to_cpu(resp->actual_read_len);
2110 *eof = le32_to_cpu(resp->eof);
2111 } else {
2112 *addn_status = resp->additional_status;
2113 }
2114
2115 err_unlock:
2116 spin_unlock_bh(&adapter->mcc_lock);
2117 return status;
2118 }
2119
2120 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2121 u32 flash_type, u32 flash_opcode, u32 buf_size)
2122 {
2123 struct be_mcc_wrb *wrb;
2124 struct be_cmd_write_flashrom *req;
2125 int status;
2126
2127 spin_lock_bh(&adapter->mcc_lock);
2128 adapter->flash_status = 0;
2129
2130 wrb = wrb_from_mccq(adapter);
2131 if (!wrb) {
2132 status = -EBUSY;
2133 goto err_unlock;
2134 }
2135 req = cmd->va;
2136
2137 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2138 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2139
2140 req->params.op_type = cpu_to_le32(flash_type);
2141 req->params.op_code = cpu_to_le32(flash_opcode);
2142 req->params.data_buf_size = cpu_to_le32(buf_size);
2143
2144 be_mcc_notify(adapter);
2145 spin_unlock_bh(&adapter->mcc_lock);
2146
2147 if (!wait_for_completion_timeout(&adapter->flash_compl,
2148 msecs_to_jiffies(40000)))
2149 status = -1;
2150 else
2151 status = adapter->flash_status;
2152
2153 return status;
2154
2155 err_unlock:
2156 spin_unlock_bh(&adapter->mcc_lock);
2157 return status;
2158 }
2159
2160 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2161 int offset)
2162 {
2163 struct be_mcc_wrb *wrb;
2164 struct be_cmd_read_flash_crc *req;
2165 int status;
2166
2167 spin_lock_bh(&adapter->mcc_lock);
2168
2169 wrb = wrb_from_mccq(adapter);
2170 if (!wrb) {
2171 status = -EBUSY;
2172 goto err;
2173 }
2174 req = embedded_payload(wrb);
2175
2176 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2177 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2178 wrb, NULL);
2179
2180 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2181 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2182 req->params.offset = cpu_to_le32(offset);
2183 req->params.data_buf_size = cpu_to_le32(0x4);
2184
2185 status = be_mcc_notify_wait(adapter);
2186 if (!status)
2187 memcpy(flashed_crc, req->crc, 4);
2188
2189 err:
2190 spin_unlock_bh(&adapter->mcc_lock);
2191 return status;
2192 }
2193
2194 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2195 struct be_dma_mem *nonemb_cmd)
2196 {
2197 struct be_mcc_wrb *wrb;
2198 struct be_cmd_req_acpi_wol_magic_config *req;
2199 int status;
2200
2201 spin_lock_bh(&adapter->mcc_lock);
2202
2203 wrb = wrb_from_mccq(adapter);
2204 if (!wrb) {
2205 status = -EBUSY;
2206 goto err;
2207 }
2208 req = nonemb_cmd->va;
2209
2210 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2211 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2212 nonemb_cmd);
2213 memcpy(req->magic_mac, mac, ETH_ALEN);
2214
2215 status = be_mcc_notify_wait(adapter);
2216
2217 err:
2218 spin_unlock_bh(&adapter->mcc_lock);
2219 return status;
2220 }
2221
2222 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2223 u8 loopback_type, u8 enable)
2224 {
2225 struct be_mcc_wrb *wrb;
2226 struct be_cmd_req_set_lmode *req;
2227 int status;
2228
2229 spin_lock_bh(&adapter->mcc_lock);
2230
2231 wrb = wrb_from_mccq(adapter);
2232 if (!wrb) {
2233 status = -EBUSY;
2234 goto err;
2235 }
2236
2237 req = embedded_payload(wrb);
2238
2239 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2240 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2241 NULL);
2242
2243 req->src_port = port_num;
2244 req->dest_port = port_num;
2245 req->loopback_type = loopback_type;
2246 req->loopback_state = enable;
2247
2248 status = be_mcc_notify_wait(adapter);
2249 err:
2250 spin_unlock_bh(&adapter->mcc_lock);
2251 return status;
2252 }
2253
2254 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2255 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2256 {
2257 struct be_mcc_wrb *wrb;
2258 struct be_cmd_req_loopback_test *req;
2259 int status;
2260
2261 spin_lock_bh(&adapter->mcc_lock);
2262
2263 wrb = wrb_from_mccq(adapter);
2264 if (!wrb) {
2265 status = -EBUSY;
2266 goto err;
2267 }
2268
2269 req = embedded_payload(wrb);
2270
2271 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2272 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2273 req->hdr.timeout = cpu_to_le32(4);
2274
2275 req->pattern = cpu_to_le64(pattern);
2276 req->src_port = cpu_to_le32(port_num);
2277 req->dest_port = cpu_to_le32(port_num);
2278 req->pkt_size = cpu_to_le32(pkt_size);
2279 req->num_pkts = cpu_to_le32(num_pkts);
2280 req->loopback_type = cpu_to_le32(loopback_type);
2281
2282 status = be_mcc_notify_wait(adapter);
2283 if (!status) {
2284 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2285 status = le32_to_cpu(resp->status);
2286 }
2287
2288 err:
2289 spin_unlock_bh(&adapter->mcc_lock);
2290 return status;
2291 }
2292
2293 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2294 u32 byte_cnt, struct be_dma_mem *cmd)
2295 {
2296 struct be_mcc_wrb *wrb;
2297 struct be_cmd_req_ddrdma_test *req;
2298 int status;
2299 int i, j = 0;
2300
2301 spin_lock_bh(&adapter->mcc_lock);
2302
2303 wrb = wrb_from_mccq(adapter);
2304 if (!wrb) {
2305 status = -EBUSY;
2306 goto err;
2307 }
2308 req = cmd->va;
2309 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2310 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2311
2312 req->pattern = cpu_to_le64(pattern);
2313 req->byte_count = cpu_to_le32(byte_cnt);
2314 for (i = 0; i < byte_cnt; i++) {
2315 req->snd_buff[i] = (u8)(pattern >> (j*8));
2316 j++;
2317 if (j > 7)
2318 j = 0;
2319 }
2320
2321 status = be_mcc_notify_wait(adapter);
2322
2323 if (!status) {
2324 struct be_cmd_resp_ddrdma_test *resp;
2325 resp = cmd->va;
2326 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2327 resp->snd_err) {
2328 status = -1;
2329 }
2330 }
2331
2332 err:
2333 spin_unlock_bh(&adapter->mcc_lock);
2334 return status;
2335 }
2336
2337 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2338 struct be_dma_mem *nonemb_cmd)
2339 {
2340 struct be_mcc_wrb *wrb;
2341 struct be_cmd_req_seeprom_read *req;
2342 struct be_sge *sge;
2343 int status;
2344
2345 spin_lock_bh(&adapter->mcc_lock);
2346
2347 wrb = wrb_from_mccq(adapter);
2348 if (!wrb) {
2349 status = -EBUSY;
2350 goto err;
2351 }
2352 req = nonemb_cmd->va;
2353 sge = nonembedded_sgl(wrb);
2354
2355 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2356 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2357 nonemb_cmd);
2358
2359 status = be_mcc_notify_wait(adapter);
2360
2361 err:
2362 spin_unlock_bh(&adapter->mcc_lock);
2363 return status;
2364 }
2365
2366 int be_cmd_get_phy_info(struct be_adapter *adapter)
2367 {
2368 struct be_mcc_wrb *wrb;
2369 struct be_cmd_req_get_phy_info *req;
2370 struct be_dma_mem cmd;
2371 int status;
2372
2373 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2374 CMD_SUBSYSTEM_COMMON))
2375 return -EPERM;
2376
2377 spin_lock_bh(&adapter->mcc_lock);
2378
2379 wrb = wrb_from_mccq(adapter);
2380 if (!wrb) {
2381 status = -EBUSY;
2382 goto err;
2383 }
2384 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2385 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2386 &cmd.dma);
2387 if (!cmd.va) {
2388 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2389 status = -ENOMEM;
2390 goto err;
2391 }
2392
2393 req = cmd.va;
2394
2395 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2396 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2397 wrb, &cmd);
2398
2399 status = be_mcc_notify_wait(adapter);
2400 if (!status) {
2401 struct be_phy_info *resp_phy_info =
2402 cmd.va + sizeof(struct be_cmd_req_hdr);
2403 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2404 adapter->phy.interface_type =
2405 le16_to_cpu(resp_phy_info->interface_type);
2406 adapter->phy.auto_speeds_supported =
2407 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2408 adapter->phy.fixed_speeds_supported =
2409 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2410 adapter->phy.misc_params =
2411 le32_to_cpu(resp_phy_info->misc_params);
2412 }
2413 pci_free_consistent(adapter->pdev, cmd.size,
2414 cmd.va, cmd.dma);
2415 err:
2416 spin_unlock_bh(&adapter->mcc_lock);
2417 return status;
2418 }
2419
2420 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2421 {
2422 struct be_mcc_wrb *wrb;
2423 struct be_cmd_req_set_qos *req;
2424 int status;
2425
2426 spin_lock_bh(&adapter->mcc_lock);
2427
2428 wrb = wrb_from_mccq(adapter);
2429 if (!wrb) {
2430 status = -EBUSY;
2431 goto err;
2432 }
2433
2434 req = embedded_payload(wrb);
2435
2436 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2437 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2438
2439 req->hdr.domain = domain;
2440 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2441 req->max_bps_nic = cpu_to_le32(bps);
2442
2443 status = be_mcc_notify_wait(adapter);
2444
2445 err:
2446 spin_unlock_bh(&adapter->mcc_lock);
2447 return status;
2448 }
2449
2450 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2451 {
2452 struct be_mcc_wrb *wrb;
2453 struct be_cmd_req_cntl_attribs *req;
2454 struct be_cmd_resp_cntl_attribs *resp;
2455 int status;
2456 int payload_len = max(sizeof(*req), sizeof(*resp));
2457 struct mgmt_controller_attrib *attribs;
2458 struct be_dma_mem attribs_cmd;
2459
2460 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2461 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2462 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2463 &attribs_cmd.dma);
2464 if (!attribs_cmd.va) {
2465 dev_err(&adapter->pdev->dev,
2466 "Memory allocation failure\n");
2467 return -ENOMEM;
2468 }
2469
2470 if (mutex_lock_interruptible(&adapter->mbox_lock))
2471 return -1;
2472
2473 wrb = wrb_from_mbox(adapter);
2474 if (!wrb) {
2475 status = -EBUSY;
2476 goto err;
2477 }
2478 req = attribs_cmd.va;
2479
2480 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2481 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2482 &attribs_cmd);
2483
2484 status = be_mbox_notify_wait(adapter);
2485 if (!status) {
2486 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2487 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2488 }
2489
2490 err:
2491 mutex_unlock(&adapter->mbox_lock);
2492 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2493 attribs_cmd.dma);
2494 return status;
2495 }
2496
2497 /* Uses mbox */
2498 int be_cmd_req_native_mode(struct be_adapter *adapter)
2499 {
2500 struct be_mcc_wrb *wrb;
2501 struct be_cmd_req_set_func_cap *req;
2502 int status;
2503
2504 if (mutex_lock_interruptible(&adapter->mbox_lock))
2505 return -1;
2506
2507 wrb = wrb_from_mbox(adapter);
2508 if (!wrb) {
2509 status = -EBUSY;
2510 goto err;
2511 }
2512
2513 req = embedded_payload(wrb);
2514
2515 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2516 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2517
2518 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2519 CAPABILITY_BE3_NATIVE_ERX_API);
2520 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2521
2522 status = be_mbox_notify_wait(adapter);
2523 if (!status) {
2524 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2525 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2526 CAPABILITY_BE3_NATIVE_ERX_API;
2527 if (!adapter->be3_native)
2528 dev_warn(&adapter->pdev->dev,
2529 "adapter not in advanced mode\n");
2530 }
2531 err:
2532 mutex_unlock(&adapter->mbox_lock);
2533 return status;
2534 }
2535
2536 /* Get privilege(s) for a function */
2537 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2538 u32 domain)
2539 {
2540 struct be_mcc_wrb *wrb;
2541 struct be_cmd_req_get_fn_privileges *req;
2542 int status;
2543
2544 spin_lock_bh(&adapter->mcc_lock);
2545
2546 wrb = wrb_from_mccq(adapter);
2547 if (!wrb) {
2548 status = -EBUSY;
2549 goto err;
2550 }
2551
2552 req = embedded_payload(wrb);
2553
2554 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2555 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2556 wrb, NULL);
2557
2558 req->hdr.domain = domain;
2559
2560 status = be_mcc_notify_wait(adapter);
2561 if (!status) {
2562 struct be_cmd_resp_get_fn_privileges *resp =
2563 embedded_payload(wrb);
2564 *privilege = le32_to_cpu(resp->privilege_mask);
2565 }
2566
2567 err:
2568 spin_unlock_bh(&adapter->mcc_lock);
2569 return status;
2570 }
2571
2572 /* Uses synchronous MCCQ */
2573 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2574 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2575 {
2576 struct be_mcc_wrb *wrb;
2577 struct be_cmd_req_get_mac_list *req;
2578 int status;
2579 int mac_count;
2580 struct be_dma_mem get_mac_list_cmd;
2581 int i;
2582
2583 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2584 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2585 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2586 get_mac_list_cmd.size,
2587 &get_mac_list_cmd.dma);
2588
2589 if (!get_mac_list_cmd.va) {
2590 dev_err(&adapter->pdev->dev,
2591 "Memory allocation failure during GET_MAC_LIST\n");
2592 return -ENOMEM;
2593 }
2594
2595 spin_lock_bh(&adapter->mcc_lock);
2596
2597 wrb = wrb_from_mccq(adapter);
2598 if (!wrb) {
2599 status = -EBUSY;
2600 goto out;
2601 }
2602
2603 req = get_mac_list_cmd.va;
2604
2605 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2606 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2607 wrb, &get_mac_list_cmd);
2608
2609 req->hdr.domain = domain;
2610 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2611 req->perm_override = 1;
2612
2613 status = be_mcc_notify_wait(adapter);
2614 if (!status) {
2615 struct be_cmd_resp_get_mac_list *resp =
2616 get_mac_list_cmd.va;
2617 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2618 /* Mac list returned could contain one or more active mac_ids
2619 * or one or more true or pseudo permanant mac addresses.
2620 * If an active mac_id is present, return first active mac_id
2621 * found.
2622 */
2623 for (i = 0; i < mac_count; i++) {
2624 struct get_list_macaddr *mac_entry;
2625 u16 mac_addr_size;
2626 u32 mac_id;
2627
2628 mac_entry = &resp->macaddr_list[i];
2629 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2630 /* mac_id is a 32 bit value and mac_addr size
2631 * is 6 bytes
2632 */
2633 if (mac_addr_size == sizeof(u32)) {
2634 *pmac_id_active = true;
2635 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2636 *pmac_id = le32_to_cpu(mac_id);
2637 goto out;
2638 }
2639 }
2640 /* If no active mac_id found, return first mac addr */
2641 *pmac_id_active = false;
2642 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2643 ETH_ALEN);
2644 }
2645
2646 out:
2647 spin_unlock_bh(&adapter->mcc_lock);
2648 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2649 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2650 return status;
2651 }
2652
2653 /* Uses synchronous MCCQ */
2654 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2655 u8 mac_count, u32 domain)
2656 {
2657 struct be_mcc_wrb *wrb;
2658 struct be_cmd_req_set_mac_list *req;
2659 int status;
2660 struct be_dma_mem cmd;
2661
2662 memset(&cmd, 0, sizeof(struct be_dma_mem));
2663 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2664 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2665 &cmd.dma, GFP_KERNEL);
2666 if (!cmd.va) {
2667 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2668 return -ENOMEM;
2669 }
2670
2671 spin_lock_bh(&adapter->mcc_lock);
2672
2673 wrb = wrb_from_mccq(adapter);
2674 if (!wrb) {
2675 status = -EBUSY;
2676 goto err;
2677 }
2678
2679 req = cmd.va;
2680 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2681 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2682 wrb, &cmd);
2683
2684 req->hdr.domain = domain;
2685 req->mac_count = mac_count;
2686 if (mac_count)
2687 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2688
2689 status = be_mcc_notify_wait(adapter);
2690
2691 err:
2692 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2693 cmd.va, cmd.dma);
2694 spin_unlock_bh(&adapter->mcc_lock);
2695 return status;
2696 }
2697
2698 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2699 u32 domain, u16 intf_id)
2700 {
2701 struct be_mcc_wrb *wrb;
2702 struct be_cmd_req_set_hsw_config *req;
2703 void *ctxt;
2704 int status;
2705
2706 spin_lock_bh(&adapter->mcc_lock);
2707
2708 wrb = wrb_from_mccq(adapter);
2709 if (!wrb) {
2710 status = -EBUSY;
2711 goto err;
2712 }
2713
2714 req = embedded_payload(wrb);
2715 ctxt = &req->context;
2716
2717 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2718 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2719
2720 req->hdr.domain = domain;
2721 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2722 if (pvid) {
2723 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2724 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2725 }
2726
2727 be_dws_cpu_to_le(req->context, sizeof(req->context));
2728 status = be_mcc_notify_wait(adapter);
2729
2730 err:
2731 spin_unlock_bh(&adapter->mcc_lock);
2732 return status;
2733 }
2734
2735 /* Get Hyper switch config */
2736 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2737 u32 domain, u16 intf_id)
2738 {
2739 struct be_mcc_wrb *wrb;
2740 struct be_cmd_req_get_hsw_config *req;
2741 void *ctxt;
2742 int status;
2743 u16 vid;
2744
2745 spin_lock_bh(&adapter->mcc_lock);
2746
2747 wrb = wrb_from_mccq(adapter);
2748 if (!wrb) {
2749 status = -EBUSY;
2750 goto err;
2751 }
2752
2753 req = embedded_payload(wrb);
2754 ctxt = &req->context;
2755
2756 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2757 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2758
2759 req->hdr.domain = domain;
2760 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2761 intf_id);
2762 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2763 be_dws_cpu_to_le(req->context, sizeof(req->context));
2764
2765 status = be_mcc_notify_wait(adapter);
2766 if (!status) {
2767 struct be_cmd_resp_get_hsw_config *resp =
2768 embedded_payload(wrb);
2769 be_dws_le_to_cpu(&resp->context,
2770 sizeof(resp->context));
2771 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2772 pvid, &resp->context);
2773 *pvid = le16_to_cpu(vid);
2774 }
2775
2776 err:
2777 spin_unlock_bh(&adapter->mcc_lock);
2778 return status;
2779 }
2780
2781 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2782 {
2783 struct be_mcc_wrb *wrb;
2784 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2785 int status;
2786 int payload_len = sizeof(*req);
2787 struct be_dma_mem cmd;
2788
2789 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2790 CMD_SUBSYSTEM_ETH))
2791 return -EPERM;
2792
2793 memset(&cmd, 0, sizeof(struct be_dma_mem));
2794 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2795 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2796 &cmd.dma);
2797 if (!cmd.va) {
2798 dev_err(&adapter->pdev->dev,
2799 "Memory allocation failure\n");
2800 return -ENOMEM;
2801 }
2802
2803 if (mutex_lock_interruptible(&adapter->mbox_lock))
2804 return -1;
2805
2806 wrb = wrb_from_mbox(adapter);
2807 if (!wrb) {
2808 status = -EBUSY;
2809 goto err;
2810 }
2811
2812 req = cmd.va;
2813
2814 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2815 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2816 payload_len, wrb, &cmd);
2817
2818 req->hdr.version = 1;
2819 req->query_options = BE_GET_WOL_CAP;
2820
2821 status = be_mbox_notify_wait(adapter);
2822 if (!status) {
2823 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2824 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2825
2826 /* the command could succeed misleadingly on old f/w
2827 * which is not aware of the V1 version. fake an error. */
2828 if (resp->hdr.response_length < payload_len) {
2829 status = -1;
2830 goto err;
2831 }
2832 adapter->wol_cap = resp->wol_settings;
2833 }
2834 err:
2835 mutex_unlock(&adapter->mbox_lock);
2836 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2837 return status;
2838
2839 }
2840 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2841 struct be_dma_mem *cmd)
2842 {
2843 struct be_mcc_wrb *wrb;
2844 struct be_cmd_req_get_ext_fat_caps *req;
2845 int status;
2846
2847 if (mutex_lock_interruptible(&adapter->mbox_lock))
2848 return -1;
2849
2850 wrb = wrb_from_mbox(adapter);
2851 if (!wrb) {
2852 status = -EBUSY;
2853 goto err;
2854 }
2855
2856 req = cmd->va;
2857 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2858 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2859 cmd->size, wrb, cmd);
2860 req->parameter_type = cpu_to_le32(1);
2861
2862 status = be_mbox_notify_wait(adapter);
2863 err:
2864 mutex_unlock(&adapter->mbox_lock);
2865 return status;
2866 }
2867
2868 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2869 struct be_dma_mem *cmd,
2870 struct be_fat_conf_params *configs)
2871 {
2872 struct be_mcc_wrb *wrb;
2873 struct be_cmd_req_set_ext_fat_caps *req;
2874 int status;
2875
2876 spin_lock_bh(&adapter->mcc_lock);
2877
2878 wrb = wrb_from_mccq(adapter);
2879 if (!wrb) {
2880 status = -EBUSY;
2881 goto err;
2882 }
2883
2884 req = cmd->va;
2885 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2886 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2887 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2888 cmd->size, wrb, cmd);
2889
2890 status = be_mcc_notify_wait(adapter);
2891 err:
2892 spin_unlock_bh(&adapter->mcc_lock);
2893 return status;
2894 }
2895
2896 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2897 {
2898 struct be_mcc_wrb *wrb;
2899 struct be_cmd_req_get_port_name *req;
2900 int status;
2901
2902 if (!lancer_chip(adapter)) {
2903 *port_name = adapter->hba_port_num + '0';
2904 return 0;
2905 }
2906
2907 spin_lock_bh(&adapter->mcc_lock);
2908
2909 wrb = wrb_from_mccq(adapter);
2910 if (!wrb) {
2911 status = -EBUSY;
2912 goto err;
2913 }
2914
2915 req = embedded_payload(wrb);
2916
2917 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2918 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2919 NULL);
2920 req->hdr.version = 1;
2921
2922 status = be_mcc_notify_wait(adapter);
2923 if (!status) {
2924 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2925 *port_name = resp->port_name[adapter->hba_port_num];
2926 } else {
2927 *port_name = adapter->hba_port_num + '0';
2928 }
2929 err:
2930 spin_unlock_bh(&adapter->mcc_lock);
2931 return status;
2932 }
2933
2934 static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2935 u32 max_buf_size)
2936 {
2937 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2938 int i;
2939
2940 for (i = 0; i < desc_count; i++) {
2941 desc->desc_len = RESOURCE_DESC_SIZE;
2942 if (((void *)desc + desc->desc_len) >
2943 (void *)(buf + max_buf_size)) {
2944 desc = NULL;
2945 break;
2946 }
2947
2948 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2949 break;
2950
2951 desc = (void *)desc + desc->desc_len;
2952 }
2953
2954 if (!desc || i == MAX_RESOURCE_DESC)
2955 return NULL;
2956
2957 return desc;
2958 }
2959
2960 /* Uses Mbox */
2961 int be_cmd_get_func_config(struct be_adapter *adapter)
2962 {
2963 struct be_mcc_wrb *wrb;
2964 struct be_cmd_req_get_func_config *req;
2965 int status;
2966 struct be_dma_mem cmd;
2967
2968 memset(&cmd, 0, sizeof(struct be_dma_mem));
2969 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2970 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2971 &cmd.dma);
2972 if (!cmd.va) {
2973 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2974 return -ENOMEM;
2975 }
2976 if (mutex_lock_interruptible(&adapter->mbox_lock))
2977 return -1;
2978
2979 wrb = wrb_from_mbox(adapter);
2980 if (!wrb) {
2981 status = -EBUSY;
2982 goto err;
2983 }
2984
2985 req = cmd.va;
2986
2987 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2988 OPCODE_COMMON_GET_FUNC_CONFIG,
2989 cmd.size, wrb, &cmd);
2990
2991 status = be_mbox_notify_wait(adapter);
2992 if (!status) {
2993 struct be_cmd_resp_get_func_config *resp = cmd.va;
2994 u32 desc_count = le32_to_cpu(resp->desc_count);
2995 struct be_nic_resource_desc *desc;
2996
2997 desc = be_get_nic_desc(resp->func_param, desc_count,
2998 sizeof(resp->func_param));
2999 if (!desc) {
3000 status = -EINVAL;
3001 goto err;
3002 }
3003
3004 adapter->pf_number = desc->pf_num;
3005 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3006 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3007 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3008 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3009 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3010 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3011
3012 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3013 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3014 }
3015 err:
3016 mutex_unlock(&adapter->mbox_lock);
3017 pci_free_consistent(adapter->pdev, cmd.size,
3018 cmd.va, cmd.dma);
3019 return status;
3020 }
3021
3022 /* Uses sync mcc */
3023 int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3024 u8 domain)
3025 {
3026 struct be_mcc_wrb *wrb;
3027 struct be_cmd_req_get_profile_config *req;
3028 int status;
3029 struct be_dma_mem cmd;
3030
3031 memset(&cmd, 0, sizeof(struct be_dma_mem));
3032 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3033 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3034 &cmd.dma);
3035 if (!cmd.va) {
3036 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3037 return -ENOMEM;
3038 }
3039
3040 spin_lock_bh(&adapter->mcc_lock);
3041
3042 wrb = wrb_from_mccq(adapter);
3043 if (!wrb) {
3044 status = -EBUSY;
3045 goto err;
3046 }
3047
3048 req = cmd.va;
3049
3050 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3051 OPCODE_COMMON_GET_PROFILE_CONFIG,
3052 cmd.size, wrb, &cmd);
3053
3054 req->type = ACTIVE_PROFILE_TYPE;
3055 req->hdr.domain = domain;
3056
3057 status = be_mcc_notify_wait(adapter);
3058 if (!status) {
3059 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3060 u32 desc_count = le32_to_cpu(resp->desc_count);
3061 struct be_nic_resource_desc *desc;
3062
3063 desc = be_get_nic_desc(resp->func_param, desc_count,
3064 sizeof(resp->func_param));
3065
3066 if (!desc) {
3067 status = -EINVAL;
3068 goto err;
3069 }
3070 *cap_flags = le32_to_cpu(desc->cap_flags);
3071 }
3072 err:
3073 spin_unlock_bh(&adapter->mcc_lock);
3074 pci_free_consistent(adapter->pdev, cmd.size,
3075 cmd.va, cmd.dma);
3076 return status;
3077 }
3078
3079 /* Uses sync mcc */
3080 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3081 u8 domain)
3082 {
3083 struct be_mcc_wrb *wrb;
3084 struct be_cmd_req_set_profile_config *req;
3085 int status;
3086
3087 spin_lock_bh(&adapter->mcc_lock);
3088
3089 wrb = wrb_from_mccq(adapter);
3090 if (!wrb) {
3091 status = -EBUSY;
3092 goto err;
3093 }
3094
3095 req = embedded_payload(wrb);
3096
3097 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3098 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3099 wrb, NULL);
3100
3101 req->hdr.domain = domain;
3102 req->desc_count = cpu_to_le32(1);
3103
3104 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3105 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3106 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3107 req->nic_desc.pf_num = adapter->pf_number;
3108 req->nic_desc.vf_num = domain;
3109
3110 /* Mark fields invalid */
3111 req->nic_desc.unicast_mac_count = 0xFFFF;
3112 req->nic_desc.mcc_count = 0xFFFF;
3113 req->nic_desc.vlan_count = 0xFFFF;
3114 req->nic_desc.mcast_mac_count = 0xFFFF;
3115 req->nic_desc.txq_count = 0xFFFF;
3116 req->nic_desc.rq_count = 0xFFFF;
3117 req->nic_desc.rssq_count = 0xFFFF;
3118 req->nic_desc.lro_count = 0xFFFF;
3119 req->nic_desc.cq_count = 0xFFFF;
3120 req->nic_desc.toe_conn_count = 0xFFFF;
3121 req->nic_desc.eq_count = 0xFFFF;
3122 req->nic_desc.link_param = 0xFF;
3123 req->nic_desc.bw_min = 0xFFFFFFFF;
3124 req->nic_desc.acpi_params = 0xFF;
3125 req->nic_desc.wol_param = 0x0F;
3126
3127 /* Change BW */
3128 req->nic_desc.bw_min = cpu_to_le32(bps);
3129 req->nic_desc.bw_max = cpu_to_le32(bps);
3130 status = be_mcc_notify_wait(adapter);
3131 err:
3132 spin_unlock_bh(&adapter->mcc_lock);
3133 return status;
3134 }
3135
3136 /* Uses sync mcc */
3137 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3138 {
3139 struct be_mcc_wrb *wrb;
3140 struct be_cmd_enable_disable_vf *req;
3141 int status;
3142
3143 if (!lancer_chip(adapter))
3144 return 0;
3145
3146 spin_lock_bh(&adapter->mcc_lock);
3147
3148 wrb = wrb_from_mccq(adapter);
3149 if (!wrb) {
3150 status = -EBUSY;
3151 goto err;
3152 }
3153
3154 req = embedded_payload(wrb);
3155
3156 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3157 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3158 wrb, NULL);
3159
3160 req->hdr.domain = domain;
3161 req->enable = 1;
3162 status = be_mcc_notify_wait(adapter);
3163 err:
3164 spin_unlock_bh(&adapter->mcc_lock);
3165 return status;
3166 }
3167
3168 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3169 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3170 {
3171 struct be_adapter *adapter = netdev_priv(netdev_handle);
3172 struct be_mcc_wrb *wrb;
3173 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3174 struct be_cmd_req_hdr *req;
3175 struct be_cmd_resp_hdr *resp;
3176 int status;
3177
3178 spin_lock_bh(&adapter->mcc_lock);
3179
3180 wrb = wrb_from_mccq(adapter);
3181 if (!wrb) {
3182 status = -EBUSY;
3183 goto err;
3184 }
3185 req = embedded_payload(wrb);
3186 resp = embedded_payload(wrb);
3187
3188 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3189 hdr->opcode, wrb_payload_size, wrb, NULL);
3190 memcpy(req, wrb_payload, wrb_payload_size);
3191 be_dws_cpu_to_le(req, wrb_payload_size);
3192
3193 status = be_mcc_notify_wait(adapter);
3194 if (cmd_status)
3195 *cmd_status = (status & 0xffff);
3196 if (ext_status)
3197 *ext_status = 0;
3198 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3199 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3200 err:
3201 spin_unlock_bh(&adapter->mcc_lock);
3202 return status;
3203 }
3204 EXPORT_SYMBOL(be_roce_mcc_cmd);
This page took 0.100181 seconds and 5 git commands to generate.