Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #include "be.h" | |
8788fdc2 | 19 | #include "be_cmds.h" |
6b7c5b94 | 20 | |
8788fdc2 | 21 | static void be_mcc_notify(struct be_adapter *adapter) |
5fb379ee | 22 | { |
8788fdc2 | 23 | struct be_queue_info *mccq = &adapter->mcc_obj.q; |
5fb379ee SP |
24 | u32 val = 0; |
25 | ||
26 | val |= mccq->id & DB_MCCQ_RING_ID_MASK; | |
27 | val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; | |
8788fdc2 | 28 | iowrite32(val, adapter->db + DB_MCCQ_OFFSET); |
5fb379ee SP |
29 | } |
30 | ||
31 | /* To check if valid bit is set, check the entire word as we don't know | |
32 | * the endianness of the data (old entry is host endian while a new entry is | |
33 | * little endian) */ | |
34 | static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl) | |
35 | { | |
36 | if (compl->flags != 0) { | |
37 | compl->flags = le32_to_cpu(compl->flags); | |
38 | BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); | |
39 | return true; | |
40 | } else { | |
41 | return false; | |
42 | } | |
43 | } | |
44 | ||
45 | /* Need to reset the entire word that houses the valid bit */ | |
46 | static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl) | |
47 | { | |
48 | compl->flags = 0; | |
49 | } | |
50 | ||
8788fdc2 | 51 | static int be_mcc_compl_process(struct be_adapter *adapter, |
5fb379ee SP |
52 | struct be_mcc_cq_entry *compl) |
53 | { | |
54 | u16 compl_status, extd_status; | |
55 | ||
56 | /* Just swap the status to host endian; mcc tag is opaquely copied | |
57 | * from mcc_wrb */ | |
58 | be_dws_le_to_cpu(compl, 4); | |
59 | ||
60 | compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & | |
61 | CQE_STATUS_COMPL_MASK; | |
62 | if (compl_status != MCC_STATUS_SUCCESS) { | |
63 | extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & | |
64 | CQE_STATUS_EXTD_MASK; | |
65 | printk(KERN_WARNING DRV_NAME | |
66 | " error in cmd completion: status(compl/extd)=%d/%d\n", | |
67 | compl_status, extd_status); | |
68 | return -1; | |
69 | } | |
70 | return 0; | |
71 | } | |
72 | ||
a8f447bd | 73 | /* Link state evt is a string of bytes; no need for endian swapping */ |
8788fdc2 | 74 | static void be_async_link_state_process(struct be_adapter *adapter, |
a8f447bd SP |
75 | struct be_async_event_link_state *evt) |
76 | { | |
8788fdc2 SP |
77 | be_link_status_update(adapter, |
78 | evt->port_link_status == ASYNC_EVENT_LINK_UP); | |
a8f447bd SP |
79 | } |
80 | ||
81 | static inline bool is_link_state_evt(u32 trailer) | |
82 | { | |
83 | return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & | |
84 | ASYNC_TRAILER_EVENT_CODE_MASK) == | |
85 | ASYNC_EVENT_CODE_LINK_STATE); | |
86 | } | |
5fb379ee | 87 | |
8788fdc2 | 88 | static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_adapter *adapter) |
5fb379ee | 89 | { |
8788fdc2 | 90 | struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; |
5fb379ee SP |
91 | struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq); |
92 | ||
93 | if (be_mcc_compl_is_new(compl)) { | |
94 | queue_tail_inc(mcc_cq); | |
95 | return compl; | |
96 | } | |
97 | return NULL; | |
98 | } | |
99 | ||
8788fdc2 | 100 | void be_process_mcc(struct be_adapter *adapter) |
5fb379ee SP |
101 | { |
102 | struct be_mcc_cq_entry *compl; | |
103 | int num = 0; | |
104 | ||
8788fdc2 SP |
105 | spin_lock_bh(&adapter->mcc_cq_lock); |
106 | while ((compl = be_mcc_compl_get(adapter))) { | |
a8f447bd SP |
107 | if (compl->flags & CQE_FLAGS_ASYNC_MASK) { |
108 | /* Interpret flags as an async trailer */ | |
109 | BUG_ON(!is_link_state_evt(compl->flags)); | |
110 | ||
111 | /* Interpret compl as a async link evt */ | |
8788fdc2 | 112 | be_async_link_state_process(adapter, |
a8f447bd SP |
113 | (struct be_async_event_link_state *) compl); |
114 | } else { | |
8788fdc2 SP |
115 | be_mcc_compl_process(adapter, compl); |
116 | atomic_dec(&adapter->mcc_obj.q.used); | |
5fb379ee SP |
117 | } |
118 | be_mcc_compl_use(compl); | |
119 | num++; | |
120 | } | |
121 | if (num) | |
8788fdc2 SP |
122 | be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num); |
123 | spin_unlock_bh(&adapter->mcc_cq_lock); | |
5fb379ee SP |
124 | } |
125 | ||
6ac7b687 | 126 | /* Wait till no more pending mcc requests are present */ |
8788fdc2 | 127 | static void be_mcc_wait_compl(struct be_adapter *adapter) |
6ac7b687 SP |
128 | { |
129 | #define mcc_timeout 50000 /* 5s timeout */ | |
130 | int i; | |
131 | for (i = 0; i < mcc_timeout; i++) { | |
8788fdc2 SP |
132 | be_process_mcc(adapter); |
133 | if (atomic_read(&adapter->mcc_obj.q.used) == 0) | |
6ac7b687 SP |
134 | break; |
135 | udelay(100); | |
136 | } | |
137 | if (i == mcc_timeout) | |
138 | printk(KERN_WARNING DRV_NAME "mcc poll timed out\n"); | |
139 | } | |
140 | ||
141 | /* Notify MCC requests and wait for completion */ | |
8788fdc2 | 142 | static void be_mcc_notify_wait(struct be_adapter *adapter) |
6ac7b687 | 143 | { |
8788fdc2 SP |
144 | be_mcc_notify(adapter); |
145 | be_mcc_wait_compl(adapter); | |
6ac7b687 SP |
146 | } |
147 | ||
6b7c5b94 SP |
148 | static int be_mbox_db_ready_wait(void __iomem *db) |
149 | { | |
150 | int cnt = 0, wait = 5; | |
151 | u32 ready; | |
152 | ||
153 | do { | |
154 | ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; | |
155 | if (ready) | |
156 | break; | |
157 | ||
158 | if (cnt > 200000) { | |
159 | printk(KERN_WARNING DRV_NAME | |
160 | ": mbox_db poll timed out\n"); | |
161 | return -1; | |
162 | } | |
163 | ||
164 | if (cnt > 50) | |
165 | wait = 200; | |
166 | cnt += wait; | |
167 | udelay(wait); | |
168 | } while (true); | |
169 | ||
170 | return 0; | |
171 | } | |
172 | ||
173 | /* | |
174 | * Insert the mailbox address into the doorbell in two steps | |
5fb379ee | 175 | * Polls on the mbox doorbell till a command completion (or a timeout) occurs |
6b7c5b94 | 176 | */ |
8788fdc2 | 177 | static int be_mbox_db_ring(struct be_adapter *adapter) |
6b7c5b94 SP |
178 | { |
179 | int status; | |
6b7c5b94 | 180 | u32 val = 0; |
8788fdc2 SP |
181 | void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; |
182 | struct be_dma_mem *mbox_mem = &adapter->mbox_mem; | |
6b7c5b94 SP |
183 | struct be_mcc_mailbox *mbox = mbox_mem->va; |
184 | struct be_mcc_cq_entry *cqe = &mbox->cqe; | |
185 | ||
186 | memset(cqe, 0, sizeof(*cqe)); | |
187 | ||
188 | val &= ~MPU_MAILBOX_DB_RDY_MASK; | |
189 | val |= MPU_MAILBOX_DB_HI_MASK; | |
190 | /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ | |
191 | val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; | |
192 | iowrite32(val, db); | |
193 | ||
194 | /* wait for ready to be set */ | |
195 | status = be_mbox_db_ready_wait(db); | |
196 | if (status != 0) | |
197 | return status; | |
198 | ||
199 | val = 0; | |
200 | val &= ~MPU_MAILBOX_DB_RDY_MASK; | |
201 | val &= ~MPU_MAILBOX_DB_HI_MASK; | |
202 | /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ | |
203 | val |= (u32)(mbox_mem->dma >> 4) << 2; | |
204 | iowrite32(val, db); | |
205 | ||
206 | status = be_mbox_db_ready_wait(db); | |
207 | if (status != 0) | |
208 | return status; | |
209 | ||
5fb379ee SP |
210 | /* A cq entry has been made now */ |
211 | if (be_mcc_compl_is_new(cqe)) { | |
8788fdc2 | 212 | status = be_mcc_compl_process(adapter, &mbox->cqe); |
5fb379ee SP |
213 | be_mcc_compl_use(cqe); |
214 | if (status) | |
215 | return status; | |
216 | } else { | |
217 | printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n"); | |
6b7c5b94 SP |
218 | return -1; |
219 | } | |
5fb379ee | 220 | return 0; |
6b7c5b94 SP |
221 | } |
222 | ||
8788fdc2 | 223 | static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) |
6b7c5b94 | 224 | { |
8788fdc2 | 225 | u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); |
6b7c5b94 SP |
226 | |
227 | *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; | |
228 | if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) | |
229 | return -1; | |
230 | else | |
231 | return 0; | |
232 | } | |
233 | ||
8788fdc2 | 234 | static int be_POST_stage_poll(struct be_adapter *adapter, u16 poll_stage) |
6b7c5b94 SP |
235 | { |
236 | u16 stage, cnt, error; | |
237 | for (cnt = 0; cnt < 5000; cnt++) { | |
8788fdc2 | 238 | error = be_POST_stage_get(adapter, &stage); |
6b7c5b94 SP |
239 | if (error) |
240 | return -1; | |
241 | ||
242 | if (stage == poll_stage) | |
243 | break; | |
244 | udelay(1000); | |
245 | } | |
246 | if (stage != poll_stage) | |
247 | return -1; | |
248 | return 0; | |
249 | } | |
250 | ||
251 | ||
8788fdc2 | 252 | int be_cmd_POST(struct be_adapter *adapter) |
6b7c5b94 SP |
253 | { |
254 | u16 stage, error; | |
255 | ||
8788fdc2 | 256 | error = be_POST_stage_get(adapter, &stage); |
6b7c5b94 SP |
257 | if (error) |
258 | goto err; | |
259 | ||
260 | if (stage == POST_STAGE_ARMFW_RDY) | |
261 | return 0; | |
262 | ||
263 | if (stage != POST_STAGE_AWAITING_HOST_RDY) | |
264 | goto err; | |
265 | ||
266 | /* On awaiting host rdy, reset and again poll on awaiting host rdy */ | |
8788fdc2 SP |
267 | iowrite32(POST_STAGE_BE_RESET, adapter->csr + MPU_EP_SEMAPHORE_OFFSET); |
268 | error = be_POST_stage_poll(adapter, POST_STAGE_AWAITING_HOST_RDY); | |
6b7c5b94 SP |
269 | if (error) |
270 | goto err; | |
271 | ||
272 | /* Now kickoff POST and poll on armfw ready */ | |
8788fdc2 SP |
273 | iowrite32(POST_STAGE_HOST_RDY, adapter->csr + MPU_EP_SEMAPHORE_OFFSET); |
274 | error = be_POST_stage_poll(adapter, POST_STAGE_ARMFW_RDY); | |
6b7c5b94 SP |
275 | if (error) |
276 | goto err; | |
277 | ||
278 | return 0; | |
279 | err: | |
280 | printk(KERN_WARNING DRV_NAME ": ERROR, stage=%d\n", stage); | |
281 | return -1; | |
282 | } | |
283 | ||
284 | static inline void *embedded_payload(struct be_mcc_wrb *wrb) | |
285 | { | |
286 | return wrb->payload.embedded_payload; | |
287 | } | |
288 | ||
289 | static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) | |
290 | { | |
291 | return &wrb->payload.sgl[0]; | |
292 | } | |
293 | ||
294 | /* Don't touch the hdr after it's prepared */ | |
295 | static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, | |
296 | bool embedded, u8 sge_cnt) | |
297 | { | |
298 | if (embedded) | |
299 | wrb->embedded |= MCC_WRB_EMBEDDED_MASK; | |
300 | else | |
301 | wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) << | |
302 | MCC_WRB_SGE_CNT_SHIFT; | |
303 | wrb->payload_length = payload_len; | |
304 | be_dws_cpu_to_le(wrb, 20); | |
305 | } | |
306 | ||
307 | /* Don't touch the hdr after it's prepared */ | |
308 | static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, | |
309 | u8 subsystem, u8 opcode, int cmd_len) | |
310 | { | |
311 | req_hdr->opcode = opcode; | |
312 | req_hdr->subsystem = subsystem; | |
313 | req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); | |
314 | } | |
315 | ||
316 | static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, | |
317 | struct be_dma_mem *mem) | |
318 | { | |
319 | int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); | |
320 | u64 dma = (u64)mem->dma; | |
321 | ||
322 | for (i = 0; i < buf_pages; i++) { | |
323 | pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); | |
324 | pages[i].hi = cpu_to_le32(upper_32_bits(dma)); | |
325 | dma += PAGE_SIZE_4K; | |
326 | } | |
327 | } | |
328 | ||
329 | /* Converts interrupt delay in microseconds to multiplier value */ | |
330 | static u32 eq_delay_to_mult(u32 usec_delay) | |
331 | { | |
332 | #define MAX_INTR_RATE 651042 | |
333 | const u32 round = 10; | |
334 | u32 multiplier; | |
335 | ||
336 | if (usec_delay == 0) | |
337 | multiplier = 0; | |
338 | else { | |
339 | u32 interrupt_rate = 1000000 / usec_delay; | |
340 | /* Max delay, corresponding to the lowest interrupt rate */ | |
341 | if (interrupt_rate == 0) | |
342 | multiplier = 1023; | |
343 | else { | |
344 | multiplier = (MAX_INTR_RATE - interrupt_rate) * round; | |
345 | multiplier /= interrupt_rate; | |
346 | /* Round the multiplier to the closest value.*/ | |
347 | multiplier = (multiplier + round/2) / round; | |
348 | multiplier = min(multiplier, (u32)1023); | |
349 | } | |
350 | } | |
351 | return multiplier; | |
352 | } | |
353 | ||
354 | static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem) | |
355 | { | |
356 | return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; | |
357 | } | |
358 | ||
5fb379ee SP |
359 | static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq) |
360 | { | |
361 | struct be_mcc_wrb *wrb = NULL; | |
362 | if (atomic_read(&mccq->used) < mccq->len) { | |
363 | wrb = queue_head_node(mccq); | |
364 | queue_head_inc(mccq); | |
365 | atomic_inc(&mccq->used); | |
366 | memset(wrb, 0, sizeof(*wrb)); | |
367 | } | |
368 | return wrb; | |
369 | } | |
370 | ||
8788fdc2 | 371 | int be_cmd_eq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
372 | struct be_queue_info *eq, int eq_delay) |
373 | { | |
8788fdc2 | 374 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
375 | struct be_cmd_req_eq_create *req = embedded_payload(wrb); |
376 | struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); | |
377 | struct be_dma_mem *q_mem = &eq->dma_mem; | |
378 | int status; | |
379 | ||
8788fdc2 | 380 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
381 | memset(wrb, 0, sizeof(*wrb)); |
382 | ||
383 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
384 | ||
385 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
386 | OPCODE_COMMON_EQ_CREATE, sizeof(*req)); | |
387 | ||
388 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
389 | ||
390 | AMAP_SET_BITS(struct amap_eq_context, func, req->context, | |
8788fdc2 | 391 | adapter->pci_func); |
6b7c5b94 SP |
392 | AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); |
393 | /* 4byte eqe*/ | |
394 | AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); | |
395 | AMAP_SET_BITS(struct amap_eq_context, count, req->context, | |
396 | __ilog2_u32(eq->len/256)); | |
397 | AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, | |
398 | eq_delay_to_mult(eq_delay)); | |
399 | be_dws_cpu_to_le(req->context, sizeof(req->context)); | |
400 | ||
401 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
402 | ||
8788fdc2 | 403 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
404 | if (!status) { |
405 | eq->id = le16_to_cpu(resp->eq_id); | |
406 | eq->created = true; | |
407 | } | |
8788fdc2 | 408 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
409 | return status; |
410 | } | |
411 | ||
8788fdc2 | 412 | int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, |
6b7c5b94 SP |
413 | u8 type, bool permanent, u32 if_handle) |
414 | { | |
8788fdc2 | 415 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
416 | struct be_cmd_req_mac_query *req = embedded_payload(wrb); |
417 | struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); | |
418 | int status; | |
419 | ||
8788fdc2 | 420 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
421 | memset(wrb, 0, sizeof(*wrb)); |
422 | ||
423 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
424 | ||
425 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
426 | OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req)); | |
427 | ||
428 | req->type = type; | |
429 | if (permanent) { | |
430 | req->permanent = 1; | |
431 | } else { | |
432 | req->if_id = cpu_to_le16((u16)if_handle); | |
433 | req->permanent = 0; | |
434 | } | |
435 | ||
8788fdc2 | 436 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
437 | if (!status) |
438 | memcpy(mac_addr, resp->mac.addr, ETH_ALEN); | |
439 | ||
8788fdc2 | 440 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
441 | return status; |
442 | } | |
443 | ||
8788fdc2 | 444 | int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, |
6b7c5b94 SP |
445 | u32 if_id, u32 *pmac_id) |
446 | { | |
8788fdc2 | 447 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
448 | struct be_cmd_req_pmac_add *req = embedded_payload(wrb); |
449 | int status; | |
450 | ||
8788fdc2 | 451 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
452 | memset(wrb, 0, sizeof(*wrb)); |
453 | ||
454 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
455 | ||
456 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
457 | OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req)); | |
458 | ||
459 | req->if_id = cpu_to_le32(if_id); | |
460 | memcpy(req->mac_address, mac_addr, ETH_ALEN); | |
461 | ||
8788fdc2 | 462 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
463 | if (!status) { |
464 | struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); | |
465 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
466 | } | |
467 | ||
8788fdc2 | 468 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
469 | return status; |
470 | } | |
471 | ||
8788fdc2 | 472 | int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id) |
6b7c5b94 | 473 | { |
8788fdc2 | 474 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
475 | struct be_cmd_req_pmac_del *req = embedded_payload(wrb); |
476 | int status; | |
477 | ||
8788fdc2 | 478 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
479 | memset(wrb, 0, sizeof(*wrb)); |
480 | ||
481 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
482 | ||
483 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
484 | OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req)); | |
485 | ||
486 | req->if_id = cpu_to_le32(if_id); | |
487 | req->pmac_id = cpu_to_le32(pmac_id); | |
488 | ||
8788fdc2 SP |
489 | status = be_mbox_db_ring(adapter); |
490 | spin_unlock(&adapter->mbox_lock); | |
6b7c5b94 SP |
491 | |
492 | return status; | |
493 | } | |
494 | ||
8788fdc2 | 495 | int be_cmd_cq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
496 | struct be_queue_info *cq, struct be_queue_info *eq, |
497 | bool sol_evts, bool no_delay, int coalesce_wm) | |
498 | { | |
8788fdc2 | 499 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
500 | struct be_cmd_req_cq_create *req = embedded_payload(wrb); |
501 | struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); | |
502 | struct be_dma_mem *q_mem = &cq->dma_mem; | |
503 | void *ctxt = &req->context; | |
504 | int status; | |
505 | ||
8788fdc2 | 506 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
507 | memset(wrb, 0, sizeof(*wrb)); |
508 | ||
509 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
510 | ||
511 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
512 | OPCODE_COMMON_CQ_CREATE, sizeof(*req)); | |
513 | ||
514 | req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); | |
515 | ||
516 | AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); | |
517 | AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); | |
518 | AMAP_SET_BITS(struct amap_cq_context, count, ctxt, | |
519 | __ilog2_u32(cq->len/256)); | |
520 | AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); | |
521 | AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); | |
522 | AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); | |
523 | AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); | |
5fb379ee | 524 | AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); |
8788fdc2 | 525 | AMAP_SET_BITS(struct amap_cq_context, func, ctxt, adapter->pci_func); |
6b7c5b94 SP |
526 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); |
527 | ||
528 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
529 | ||
8788fdc2 | 530 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
531 | if (!status) { |
532 | cq->id = le16_to_cpu(resp->cq_id); | |
533 | cq->created = true; | |
534 | } | |
8788fdc2 | 535 | spin_unlock(&adapter->mbox_lock); |
5fb379ee SP |
536 | |
537 | return status; | |
538 | } | |
539 | ||
540 | static u32 be_encoded_q_len(int q_len) | |
541 | { | |
542 | u32 len_encoded = fls(q_len); /* log2(len) + 1 */ | |
543 | if (len_encoded == 16) | |
544 | len_encoded = 0; | |
545 | return len_encoded; | |
546 | } | |
547 | ||
8788fdc2 | 548 | int be_cmd_mccq_create(struct be_adapter *adapter, |
5fb379ee SP |
549 | struct be_queue_info *mccq, |
550 | struct be_queue_info *cq) | |
551 | { | |
8788fdc2 | 552 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
5fb379ee SP |
553 | struct be_cmd_req_mcc_create *req = embedded_payload(wrb); |
554 | struct be_dma_mem *q_mem = &mccq->dma_mem; | |
555 | void *ctxt = &req->context; | |
556 | int status; | |
557 | ||
8788fdc2 | 558 | spin_lock(&adapter->mbox_lock); |
5fb379ee SP |
559 | memset(wrb, 0, sizeof(*wrb)); |
560 | ||
561 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
562 | ||
563 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
564 | OPCODE_COMMON_MCC_CREATE, sizeof(*req)); | |
565 | ||
566 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
567 | ||
8788fdc2 | 568 | AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, adapter->pci_func); |
5fb379ee SP |
569 | AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1); |
570 | AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt, | |
571 | be_encoded_q_len(mccq->len)); | |
572 | AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id); | |
573 | ||
574 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
575 | ||
576 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
577 | ||
8788fdc2 | 578 | status = be_mbox_db_ring(adapter); |
5fb379ee SP |
579 | if (!status) { |
580 | struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); | |
581 | mccq->id = le16_to_cpu(resp->id); | |
582 | mccq->created = true; | |
583 | } | |
8788fdc2 | 584 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
585 | |
586 | return status; | |
587 | } | |
588 | ||
8788fdc2 | 589 | int be_cmd_txq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
590 | struct be_queue_info *txq, |
591 | struct be_queue_info *cq) | |
592 | { | |
8788fdc2 | 593 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
594 | struct be_cmd_req_eth_tx_create *req = embedded_payload(wrb); |
595 | struct be_dma_mem *q_mem = &txq->dma_mem; | |
596 | void *ctxt = &req->context; | |
597 | int status; | |
598 | u32 len_encoded; | |
599 | ||
8788fdc2 | 600 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
601 | memset(wrb, 0, sizeof(*wrb)); |
602 | ||
603 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
604 | ||
605 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, | |
606 | sizeof(*req)); | |
607 | ||
608 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); | |
609 | req->ulp_num = BE_ULP1_NUM; | |
610 | req->type = BE_ETH_TX_RING_TYPE_STANDARD; | |
611 | ||
612 | len_encoded = fls(txq->len); /* log2(len) + 1 */ | |
613 | if (len_encoded == 16) | |
614 | len_encoded = 0; | |
615 | AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, len_encoded); | |
616 | AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt, | |
8788fdc2 | 617 | adapter->pci_func); |
6b7c5b94 SP |
618 | AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); |
619 | AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); | |
620 | ||
621 | be_dws_cpu_to_le(ctxt, sizeof(req->context)); | |
622 | ||
623 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
624 | ||
8788fdc2 | 625 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
626 | if (!status) { |
627 | struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); | |
628 | txq->id = le16_to_cpu(resp->cid); | |
629 | txq->created = true; | |
630 | } | |
8788fdc2 | 631 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
632 | |
633 | return status; | |
634 | } | |
635 | ||
8788fdc2 | 636 | int be_cmd_rxq_create(struct be_adapter *adapter, |
6b7c5b94 SP |
637 | struct be_queue_info *rxq, u16 cq_id, u16 frag_size, |
638 | u16 max_frame_size, u32 if_id, u32 rss) | |
639 | { | |
8788fdc2 | 640 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
641 | struct be_cmd_req_eth_rx_create *req = embedded_payload(wrb); |
642 | struct be_dma_mem *q_mem = &rxq->dma_mem; | |
643 | int status; | |
644 | ||
8788fdc2 | 645 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
646 | memset(wrb, 0, sizeof(*wrb)); |
647 | ||
648 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
649 | ||
650 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, | |
651 | sizeof(*req)); | |
652 | ||
653 | req->cq_id = cpu_to_le16(cq_id); | |
654 | req->frag_size = fls(frag_size) - 1; | |
655 | req->num_pages = 2; | |
656 | be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); | |
657 | req->interface_id = cpu_to_le32(if_id); | |
658 | req->max_frame_size = cpu_to_le16(max_frame_size); | |
659 | req->rss_queue = cpu_to_le32(rss); | |
660 | ||
8788fdc2 | 661 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
662 | if (!status) { |
663 | struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); | |
664 | rxq->id = le16_to_cpu(resp->id); | |
665 | rxq->created = true; | |
666 | } | |
8788fdc2 | 667 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
668 | |
669 | return status; | |
670 | } | |
671 | ||
672 | /* Generic destroyer function for all types of queues */ | |
8788fdc2 | 673 | int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, |
6b7c5b94 SP |
674 | int queue_type) |
675 | { | |
8788fdc2 | 676 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
677 | struct be_cmd_req_q_destroy *req = embedded_payload(wrb); |
678 | u8 subsys = 0, opcode = 0; | |
679 | int status; | |
680 | ||
8788fdc2 | 681 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
682 | |
683 | memset(wrb, 0, sizeof(*wrb)); | |
684 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
685 | ||
686 | switch (queue_type) { | |
687 | case QTYPE_EQ: | |
688 | subsys = CMD_SUBSYSTEM_COMMON; | |
689 | opcode = OPCODE_COMMON_EQ_DESTROY; | |
690 | break; | |
691 | case QTYPE_CQ: | |
692 | subsys = CMD_SUBSYSTEM_COMMON; | |
693 | opcode = OPCODE_COMMON_CQ_DESTROY; | |
694 | break; | |
695 | case QTYPE_TXQ: | |
696 | subsys = CMD_SUBSYSTEM_ETH; | |
697 | opcode = OPCODE_ETH_TX_DESTROY; | |
698 | break; | |
699 | case QTYPE_RXQ: | |
700 | subsys = CMD_SUBSYSTEM_ETH; | |
701 | opcode = OPCODE_ETH_RX_DESTROY; | |
702 | break; | |
5fb379ee SP |
703 | case QTYPE_MCCQ: |
704 | subsys = CMD_SUBSYSTEM_COMMON; | |
705 | opcode = OPCODE_COMMON_MCC_DESTROY; | |
706 | break; | |
6b7c5b94 SP |
707 | default: |
708 | printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n"); | |
709 | status = -1; | |
710 | goto err; | |
711 | } | |
712 | be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); | |
713 | req->id = cpu_to_le16(q->id); | |
714 | ||
8788fdc2 | 715 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 716 | err: |
8788fdc2 | 717 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
718 | |
719 | return status; | |
720 | } | |
721 | ||
722 | /* Create an rx filtering policy configuration on an i/f */ | |
8788fdc2 | 723 | int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, |
6b7c5b94 SP |
724 | bool pmac_invalid, u32 *if_handle, u32 *pmac_id) |
725 | { | |
8788fdc2 | 726 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
727 | struct be_cmd_req_if_create *req = embedded_payload(wrb); |
728 | int status; | |
729 | ||
8788fdc2 | 730 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
731 | memset(wrb, 0, sizeof(*wrb)); |
732 | ||
733 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
734 | ||
735 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
736 | OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); | |
737 | ||
738 | req->capability_flags = cpu_to_le32(flags); | |
739 | req->enable_flags = cpu_to_le32(flags); | |
740 | if (!pmac_invalid) | |
741 | memcpy(req->mac_addr, mac, ETH_ALEN); | |
742 | ||
8788fdc2 | 743 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
744 | if (!status) { |
745 | struct be_cmd_resp_if_create *resp = embedded_payload(wrb); | |
746 | *if_handle = le32_to_cpu(resp->interface_id); | |
747 | if (!pmac_invalid) | |
748 | *pmac_id = le32_to_cpu(resp->pmac_id); | |
749 | } | |
750 | ||
8788fdc2 | 751 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
752 | return status; |
753 | } | |
754 | ||
8788fdc2 | 755 | int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id) |
6b7c5b94 | 756 | { |
8788fdc2 | 757 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
758 | struct be_cmd_req_if_destroy *req = embedded_payload(wrb); |
759 | int status; | |
760 | ||
8788fdc2 | 761 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
762 | memset(wrb, 0, sizeof(*wrb)); |
763 | ||
764 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
765 | ||
766 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
767 | OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req)); | |
768 | ||
769 | req->interface_id = cpu_to_le32(interface_id); | |
8788fdc2 | 770 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 771 | |
8788fdc2 | 772 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
773 | |
774 | return status; | |
775 | } | |
776 | ||
777 | /* Get stats is a non embedded command: the request is not embedded inside | |
778 | * WRB but is a separate dma memory block | |
779 | */ | |
8788fdc2 | 780 | int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) |
6b7c5b94 | 781 | { |
8788fdc2 | 782 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
783 | struct be_cmd_req_get_stats *req = nonemb_cmd->va; |
784 | struct be_sge *sge = nonembedded_sgl(wrb); | |
785 | int status; | |
786 | ||
8788fdc2 | 787 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
788 | memset(wrb, 0, sizeof(*wrb)); |
789 | ||
790 | memset(req, 0, sizeof(*req)); | |
791 | ||
792 | be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); | |
793 | ||
794 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
795 | OPCODE_ETH_GET_STATISTICS, sizeof(*req)); | |
796 | sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); | |
797 | sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF); | |
798 | sge->len = cpu_to_le32(nonemb_cmd->size); | |
799 | ||
8788fdc2 | 800 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
801 | if (!status) { |
802 | struct be_cmd_resp_get_stats *resp = nonemb_cmd->va; | |
803 | be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats)); | |
804 | } | |
805 | ||
8788fdc2 | 806 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
807 | return status; |
808 | } | |
809 | ||
8788fdc2 | 810 | int be_cmd_link_status_query(struct be_adapter *adapter, |
a8f447bd | 811 | bool *link_up) |
6b7c5b94 | 812 | { |
8788fdc2 | 813 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
814 | struct be_cmd_req_link_status *req = embedded_payload(wrb); |
815 | int status; | |
816 | ||
8788fdc2 | 817 | spin_lock(&adapter->mbox_lock); |
a8f447bd SP |
818 | |
819 | *link_up = false; | |
6b7c5b94 SP |
820 | memset(wrb, 0, sizeof(*wrb)); |
821 | ||
822 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
823 | ||
824 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
825 | OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req)); | |
826 | ||
8788fdc2 | 827 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
828 | if (!status) { |
829 | struct be_cmd_resp_link_status *resp = embedded_payload(wrb); | |
a8f447bd SP |
830 | if (resp->mac_speed != PHY_LINK_SPEED_ZERO) |
831 | *link_up = true; | |
6b7c5b94 SP |
832 | } |
833 | ||
8788fdc2 | 834 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
835 | return status; |
836 | } | |
837 | ||
8788fdc2 | 838 | int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver) |
6b7c5b94 | 839 | { |
8788fdc2 | 840 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
841 | struct be_cmd_req_get_fw_version *req = embedded_payload(wrb); |
842 | int status; | |
843 | ||
8788fdc2 | 844 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
845 | memset(wrb, 0, sizeof(*wrb)); |
846 | ||
847 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
848 | ||
849 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
850 | OPCODE_COMMON_GET_FW_VERSION, sizeof(*req)); | |
851 | ||
8788fdc2 | 852 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
853 | if (!status) { |
854 | struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); | |
855 | strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); | |
856 | } | |
857 | ||
8788fdc2 | 858 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
859 | return status; |
860 | } | |
861 | ||
862 | /* set the EQ delay interval of an EQ to specified value */ | |
8788fdc2 | 863 | int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) |
6b7c5b94 | 864 | { |
8788fdc2 | 865 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
866 | struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb); |
867 | int status; | |
868 | ||
8788fdc2 | 869 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
870 | memset(wrb, 0, sizeof(*wrb)); |
871 | ||
872 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
873 | ||
874 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
875 | OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); | |
876 | ||
877 | req->num_eq = cpu_to_le32(1); | |
878 | req->delay[0].eq_id = cpu_to_le32(eq_id); | |
879 | req->delay[0].phase = 0; | |
880 | req->delay[0].delay_multiplier = cpu_to_le32(eqd); | |
881 | ||
8788fdc2 | 882 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 883 | |
8788fdc2 | 884 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
885 | return status; |
886 | } | |
887 | ||
8788fdc2 | 888 | int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, |
6b7c5b94 SP |
889 | u32 num, bool untagged, bool promiscuous) |
890 | { | |
8788fdc2 | 891 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
892 | struct be_cmd_req_vlan_config *req = embedded_payload(wrb); |
893 | int status; | |
894 | ||
8788fdc2 | 895 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
896 | memset(wrb, 0, sizeof(*wrb)); |
897 | ||
898 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
899 | ||
900 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
901 | OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req)); | |
902 | ||
903 | req->interface_id = if_id; | |
904 | req->promiscuous = promiscuous; | |
905 | req->untagged = untagged; | |
906 | req->num_vlan = num; | |
907 | if (!promiscuous) { | |
908 | memcpy(req->normal_vlan, vtag_array, | |
909 | req->num_vlan * sizeof(vtag_array[0])); | |
910 | } | |
911 | ||
8788fdc2 | 912 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 913 | |
8788fdc2 | 914 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
915 | return status; |
916 | } | |
917 | ||
6ac7b687 | 918 | /* Use MCC for this command as it may be called in BH context */ |
8788fdc2 | 919 | int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en) |
6b7c5b94 | 920 | { |
6ac7b687 SP |
921 | struct be_mcc_wrb *wrb; |
922 | struct be_cmd_req_promiscuous_config *req; | |
6b7c5b94 | 923 | |
8788fdc2 | 924 | spin_lock_bh(&adapter->mcc_lock); |
6ac7b687 | 925 | |
8788fdc2 | 926 | wrb = wrb_from_mcc(&adapter->mcc_obj.q); |
6ac7b687 SP |
927 | BUG_ON(!wrb); |
928 | ||
929 | req = embedded_payload(wrb); | |
6b7c5b94 SP |
930 | |
931 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
932 | ||
933 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, | |
934 | OPCODE_ETH_PROMISCUOUS, sizeof(*req)); | |
935 | ||
936 | if (port_num) | |
937 | req->port1_promiscuous = en; | |
938 | else | |
939 | req->port0_promiscuous = en; | |
940 | ||
8788fdc2 | 941 | be_mcc_notify_wait(adapter); |
6b7c5b94 | 942 | |
8788fdc2 | 943 | spin_unlock_bh(&adapter->mcc_lock); |
6ac7b687 | 944 | return 0; |
6b7c5b94 SP |
945 | } |
946 | ||
6ac7b687 SP |
947 | /* |
948 | * Use MCC for this command as it may be called in BH context | |
949 | * (mc == NULL) => multicast promiscous | |
950 | */ | |
8788fdc2 | 951 | int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, |
24307eef | 952 | struct dev_mc_list *mc_list, u32 mc_count) |
6b7c5b94 | 953 | { |
6ac7b687 SP |
954 | #define BE_MAX_MC 32 /* set mcast promisc if > 32 */ |
955 | struct be_mcc_wrb *wrb; | |
956 | struct be_cmd_req_mcast_mac_config *req; | |
6b7c5b94 | 957 | |
8788fdc2 | 958 | spin_lock_bh(&adapter->mcc_lock); |
6ac7b687 | 959 | |
8788fdc2 | 960 | wrb = wrb_from_mcc(&adapter->mcc_obj.q); |
6ac7b687 SP |
961 | BUG_ON(!wrb); |
962 | ||
963 | req = embedded_payload(wrb); | |
6b7c5b94 SP |
964 | |
965 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
966 | ||
967 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
968 | OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); | |
969 | ||
970 | req->interface_id = if_id; | |
24307eef SP |
971 | if (mc_list && mc_count <= BE_MAX_MC) { |
972 | int i; | |
973 | struct dev_mc_list *mc; | |
974 | ||
975 | req->num_mac = cpu_to_le16(mc_count); | |
976 | ||
977 | for (mc = mc_list, i = 0; mc; mc = mc->next, i++) | |
978 | memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); | |
979 | } else { | |
980 | req->promiscuous = 1; | |
6b7c5b94 SP |
981 | } |
982 | ||
8788fdc2 | 983 | be_mcc_notify_wait(adapter); |
6b7c5b94 | 984 | |
8788fdc2 | 985 | spin_unlock_bh(&adapter->mcc_lock); |
6ac7b687 SP |
986 | |
987 | return 0; | |
6b7c5b94 SP |
988 | } |
989 | ||
8788fdc2 | 990 | int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) |
6b7c5b94 | 991 | { |
8788fdc2 | 992 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
993 | struct be_cmd_req_set_flow_control *req = embedded_payload(wrb); |
994 | int status; | |
995 | ||
8788fdc2 | 996 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
997 | |
998 | memset(wrb, 0, sizeof(*wrb)); | |
999 | ||
1000 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
1001 | ||
1002 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1003 | OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req)); | |
1004 | ||
1005 | req->tx_flow_control = cpu_to_le16((u16)tx_fc); | |
1006 | req->rx_flow_control = cpu_to_le16((u16)rx_fc); | |
1007 | ||
8788fdc2 | 1008 | status = be_mbox_db_ring(adapter); |
6b7c5b94 | 1009 | |
8788fdc2 | 1010 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
1011 | return status; |
1012 | } | |
1013 | ||
8788fdc2 | 1014 | int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) |
6b7c5b94 | 1015 | { |
8788fdc2 | 1016 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
1017 | struct be_cmd_req_get_flow_control *req = embedded_payload(wrb); |
1018 | int status; | |
1019 | ||
8788fdc2 | 1020 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
1021 | |
1022 | memset(wrb, 0, sizeof(*wrb)); | |
1023 | ||
1024 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
1025 | ||
1026 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1027 | OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req)); | |
1028 | ||
8788fdc2 | 1029 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
1030 | if (!status) { |
1031 | struct be_cmd_resp_get_flow_control *resp = | |
1032 | embedded_payload(wrb); | |
1033 | *tx_fc = le16_to_cpu(resp->tx_flow_control); | |
1034 | *rx_fc = le16_to_cpu(resp->rx_flow_control); | |
1035 | } | |
1036 | ||
8788fdc2 | 1037 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
1038 | return status; |
1039 | } | |
1040 | ||
8788fdc2 | 1041 | int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num) |
6b7c5b94 | 1042 | { |
8788fdc2 | 1043 | struct be_mcc_wrb *wrb = wrb_from_mbox(&adapter->mbox_mem); |
6b7c5b94 SP |
1044 | struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb); |
1045 | int status; | |
1046 | ||
8788fdc2 | 1047 | spin_lock(&adapter->mbox_lock); |
6b7c5b94 SP |
1048 | |
1049 | memset(wrb, 0, sizeof(*wrb)); | |
1050 | ||
1051 | be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); | |
1052 | ||
1053 | be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, | |
1054 | OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); | |
1055 | ||
8788fdc2 | 1056 | status = be_mbox_db_ring(adapter); |
6b7c5b94 SP |
1057 | if (!status) { |
1058 | struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); | |
1059 | *port_num = le32_to_cpu(resp->phys_port); | |
1060 | } | |
1061 | ||
8788fdc2 | 1062 | spin_unlock(&adapter->mbox_lock); |
6b7c5b94 SP |
1063 | return status; |
1064 | } |