bnx2x: Multiple concurrent l2 traffic classes
[deliverable/linux.git] / drivers / net / bnx2x / bnx2x_sp.c
CommitLineData
619c5cb6
VZ
1/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
3 * Copyright 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
042181f5
VZ
19#include <linux/module.h>
20#include <linux/crc32.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/crc32c.h>
24#include "bnx2x.h"
25#include "bnx2x_cmn.h"
26#include "bnx2x_sp.h"
27
619c5cb6
VZ
28#define BNX2X_MAX_EMUL_MULTI 16
29
30/**** Exe Queue interfaces ****/
042181f5
VZ
31
32/**
619c5cb6 33 * bnx2x_exe_queue_init - init the Exe Queue object
042181f5 34 *
619c5cb6
VZ
35 * @o: poiter to the object
36 * @exe_len: length
37 * @owner: poiter to the owner
38 * @validate: validate function pointer
39 * @optimize: optimize function pointer
40 * @exec: execute function pointer
41 * @get: get function pointer
042181f5 42 */
619c5cb6
VZ
43static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
44 struct bnx2x_exe_queue_obj *o,
45 int exe_len,
46 union bnx2x_qable_obj *owner,
47 exe_q_validate validate,
48 exe_q_optimize optimize,
49 exe_q_execute exec,
50 exe_q_get get)
042181f5 51{
619c5cb6 52 memset(o, 0, sizeof(*o));
042181f5 53
619c5cb6
VZ
54 INIT_LIST_HEAD(&o->exe_queue);
55 INIT_LIST_HEAD(&o->pending_comp);
042181f5 56
619c5cb6 57 spin_lock_init(&o->lock);
042181f5 58
619c5cb6
VZ
59 o->exe_chunk_len = exe_len;
60 o->owner = owner;
042181f5 61
619c5cb6
VZ
62 /* Owner specific callbacks */
63 o->validate = validate;
64 o->optimize = optimize;
65 o->execute = exec;
66 o->get = get;
042181f5 67
619c5cb6
VZ
68 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
69 "length of %d\n", exe_len);
042181f5
VZ
70}
71
619c5cb6
VZ
72static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
73 struct bnx2x_exeq_elem *elem)
74{
75 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
76 kfree(elem);
77}
042181f5 78
619c5cb6 79static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
042181f5 80{
619c5cb6
VZ
81 struct bnx2x_exeq_elem *elem;
82 int cnt = 0;
83
84 spin_lock_bh(&o->lock);
85
86 list_for_each_entry(elem, &o->exe_queue, link)
87 cnt++;
88
89 spin_unlock_bh(&o->lock);
90
91 return cnt;
042181f5
VZ
92}
93
619c5cb6
VZ
94/**
95 * bnx2x_exe_queue_add - add a new element to the execution queue
96 *
97 * @bp: driver handle
98 * @o: queue
99 * @cmd: new command to add
100 * @restore: true - do not optimize the command
042181f5 101 *
619c5cb6 102 * If the element is optimized or is illegal, frees it.
042181f5 103 */
619c5cb6
VZ
104static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
105 struct bnx2x_exe_queue_obj *o,
106 struct bnx2x_exeq_elem *elem,
107 bool restore)
042181f5 108{
619c5cb6 109 int rc;
042181f5 110
619c5cb6 111 spin_lock_bh(&o->lock);
042181f5 112
619c5cb6
VZ
113 if (!restore) {
114 /* Try to cancel this element queue */
115 rc = o->optimize(bp, o->owner, elem);
116 if (rc)
117 goto free_and_exit;
118
119 /* Check if this request is ok */
120 rc = o->validate(bp, o->owner, elem);
121 if (rc) {
122 BNX2X_ERR("Preamble failed: %d\n", rc);
123 goto free_and_exit;
042181f5
VZ
124 }
125 }
126
619c5cb6
VZ
127 /* If so, add it to the execution queue */
128 list_add_tail(&elem->link, &o->exe_queue);
042181f5 129
619c5cb6 130 spin_unlock_bh(&o->lock);
042181f5 131
619c5cb6 132 return 0;
042181f5 133
619c5cb6
VZ
134free_and_exit:
135 bnx2x_exe_queue_free_elem(bp, elem);
042181f5 136
619c5cb6 137 spin_unlock_bh(&o->lock);
042181f5 138
619c5cb6 139 return rc;
042181f5 140
619c5cb6 141}
042181f5 142
619c5cb6
VZ
143static inline void __bnx2x_exe_queue_reset_pending(
144 struct bnx2x *bp,
145 struct bnx2x_exe_queue_obj *o)
146{
147 struct bnx2x_exeq_elem *elem;
042181f5 148
619c5cb6
VZ
149 while (!list_empty(&o->pending_comp)) {
150 elem = list_first_entry(&o->pending_comp,
151 struct bnx2x_exeq_elem, link);
042181f5 152
619c5cb6
VZ
153 list_del(&elem->link);
154 bnx2x_exe_queue_free_elem(bp, elem);
155 }
042181f5
VZ
156}
157
619c5cb6
VZ
158static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
159 struct bnx2x_exe_queue_obj *o)
042181f5 160{
042181f5 161
619c5cb6 162 spin_lock_bh(&o->lock);
042181f5 163
619c5cb6 164 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 165
619c5cb6 166 spin_unlock_bh(&o->lock);
042181f5 167
042181f5
VZ
168}
169
619c5cb6
VZ
170/**
171 * bnx2x_exe_queue_step - execute one execution chunk atomically
172 *
173 * @bp: driver handle
174 * @o: queue
175 * @ramrod_flags: flags
176 *
177 * (Atomicy is ensured using the exe_queue->lock).
178 */
179static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
180 struct bnx2x_exe_queue_obj *o,
181 unsigned long *ramrod_flags)
042181f5 182{
619c5cb6
VZ
183 struct bnx2x_exeq_elem *elem, spacer;
184 int cur_len = 0, rc;
042181f5 185
619c5cb6 186 memset(&spacer, 0, sizeof(spacer));
042181f5 187
619c5cb6 188 spin_lock_bh(&o->lock);
042181f5 189
619c5cb6
VZ
190 /*
191 * Next step should not be performed until the current is finished,
192 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
193 * properly clear object internals without sending any command to the FW
194 * which also implies there won't be any completion to clear the
195 * 'pending' list.
196 */
197 if (!list_empty(&o->pending_comp)) {
198 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
199 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
200 "resetting pending_comp\n");
201 __bnx2x_exe_queue_reset_pending(bp, o);
202 } else {
203 spin_unlock_bh(&o->lock);
204 return 1;
205 }
206 }
042181f5 207
619c5cb6
VZ
208 /*
209 * Run through the pending commands list and create a next
210 * execution chunk.
211 */
212 while (!list_empty(&o->exe_queue)) {
213 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
214 link);
215 WARN_ON(!elem->cmd_len);
042181f5 216
619c5cb6
VZ
217 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
218 cur_len += elem->cmd_len;
042181f5 219 /*
619c5cb6
VZ
220 * Prevent from both lists being empty when moving an
221 * element. This will allow the call of
222 * bnx2x_exe_queue_empty() without locking.
042181f5 223 */
619c5cb6
VZ
224 list_add_tail(&spacer.link, &o->pending_comp);
225 mb();
226 list_del(&elem->link);
227 list_add_tail(&elem->link, &o->pending_comp);
228 list_del(&spacer.link);
229 } else
230 break;
042181f5 231 }
042181f5 232
619c5cb6
VZ
233 /* Sanity check */
234 if (!cur_len) {
235 spin_unlock_bh(&o->lock);
236 return 0;
042181f5
VZ
237 }
238
619c5cb6
VZ
239 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
240 if (rc < 0)
241 /*
242 * In case of an error return the commands back to the queue
243 * and reset the pending_comp.
244 */
245 list_splice_init(&o->pending_comp, &o->exe_queue);
246 else if (!rc)
247 /*
248 * If zero is returned, means there are no outstanding pending
249 * completions and we may dismiss the pending list.
250 */
251 __bnx2x_exe_queue_reset_pending(bp, o);
042181f5 252
619c5cb6
VZ
253 spin_unlock_bh(&o->lock);
254 return rc;
255}
042181f5 256
619c5cb6
VZ
257static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
258{
259 bool empty = list_empty(&o->exe_queue);
042181f5 260
619c5cb6
VZ
261 /* Don't reorder!!! */
262 mb();
042181f5 263
619c5cb6
VZ
264 return empty && list_empty(&o->pending_comp);
265}
042181f5 266
619c5cb6
VZ
267static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
268 struct bnx2x *bp)
269{
270 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
271 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
272}
042181f5 273
619c5cb6
VZ
274/************************ raw_obj functions ***********************************/
275static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
276{
277 return !!test_bit(o->state, o->pstate);
042181f5
VZ
278}
279
619c5cb6 280static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
042181f5 281{
619c5cb6
VZ
282 smp_mb__before_clear_bit();
283 clear_bit(o->state, o->pstate);
284 smp_mb__after_clear_bit();
285}
042181f5 286
619c5cb6
VZ
287static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
288{
289 smp_mb__before_clear_bit();
290 set_bit(o->state, o->pstate);
291 smp_mb__after_clear_bit();
292}
042181f5 293
619c5cb6
VZ
294/**
295 * bnx2x_state_wait - wait until the given bit(state) is cleared
296 *
297 * @bp: device handle
298 * @state: state which is to be cleared
299 * @state_p: state buffer
300 *
301 */
302static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
303 unsigned long *pstate)
304{
305 /* can take a while if any port is running */
306 int cnt = 5000;
042181f5 307
042181f5 308
619c5cb6
VZ
309 if (CHIP_REV_IS_EMUL(bp))
310 cnt *= 20;
042181f5 311
619c5cb6
VZ
312 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
313
314 might_sleep();
315 while (cnt--) {
316 if (!test_bit(state, pstate)) {
317#ifdef BNX2X_STOP_ON_ERROR
318 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
042181f5 319#endif
619c5cb6
VZ
320 return 0;
321 }
042181f5 322
619c5cb6 323 usleep_range(1000, 1000);
042181f5 324
619c5cb6
VZ
325 if (bp->panic)
326 return -EIO;
327 }
042181f5 328
619c5cb6
VZ
329 /* timeout! */
330 BNX2X_ERR("timeout waiting for state %d\n", state);
331#ifdef BNX2X_STOP_ON_ERROR
332 bnx2x_panic();
333#endif
042181f5 334
619c5cb6
VZ
335 return -EBUSY;
336}
042181f5 337
619c5cb6
VZ
338static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
339{
340 return bnx2x_state_wait(bp, raw->state, raw->pstate);
042181f5
VZ
341}
342
619c5cb6
VZ
343/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
344/* credit handling callbacks */
345static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 346{
619c5cb6
VZ
347 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
348
349 WARN_ON(!mp);
350
351 return mp->get_entry(mp, offset);
042181f5
VZ
352}
353
619c5cb6 354static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 355{
619c5cb6
VZ
356 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
357
358 WARN_ON(!mp);
359
360 return mp->get(mp, 1);
042181f5
VZ
361}
362
619c5cb6 363static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
042181f5 364{
619c5cb6 365 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 366
619c5cb6 367 WARN_ON(!vp);
042181f5 368
619c5cb6 369 return vp->get_entry(vp, offset);
042181f5
VZ
370}
371
619c5cb6 372static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
042181f5 373{
619c5cb6 374 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 375
619c5cb6 376 WARN_ON(!vp);
042181f5 377
619c5cb6 378 return vp->get(vp, 1);
042181f5
VZ
379}
380
619c5cb6 381static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 382{
619c5cb6
VZ
383 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
384 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
385
386 if (!mp->get(mp, 1))
387 return false;
042181f5 388
619c5cb6
VZ
389 if (!vp->get(vp, 1)) {
390 mp->put(mp, 1);
391 return false;
392 }
042181f5 393
619c5cb6 394 return true;
042181f5
VZ
395}
396
619c5cb6
VZ
397static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
398{
399 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
400
401 return mp->put_entry(mp, offset);
402}
042181f5 403
619c5cb6 404static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 405{
619c5cb6 406 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
042181f5 407
619c5cb6 408 return mp->put(mp, 1);
042181f5
VZ
409}
410
619c5cb6 411static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
042181f5 412{
619c5cb6
VZ
413 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
414
415 return vp->put_entry(vp, offset);
416}
042181f5 417
619c5cb6
VZ
418static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
419{
420 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
042181f5 421
619c5cb6 422 return vp->put(vp, 1);
042181f5
VZ
423}
424
619c5cb6 425static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
042181f5 426{
619c5cb6
VZ
427 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
428 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
429
430 if (!mp->put(mp, 1))
431 return false;
042181f5 432
619c5cb6
VZ
433 if (!vp->put(vp, 1)) {
434 mp->get(mp, 1);
435 return false;
436 }
042181f5 437
619c5cb6 438 return true;
042181f5
VZ
439}
440
619c5cb6
VZ
441/* check_add() callbacks */
442static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
443 union bnx2x_classification_ramrod_data *data)
042181f5 444{
619c5cb6
VZ
445 struct bnx2x_vlan_mac_registry_elem *pos;
446
447 if (!is_valid_ether_addr(data->mac.mac))
448 return -EINVAL;
042181f5 449
619c5cb6
VZ
450 /* Check if a requested MAC already exists */
451 list_for_each_entry(pos, &o->head, link)
452 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
453 return -EEXIST;
042181f5 454
619c5cb6 455 return 0;
042181f5
VZ
456}
457
619c5cb6
VZ
458static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
459 union bnx2x_classification_ramrod_data *data)
042181f5 460{
619c5cb6 461 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 462
619c5cb6
VZ
463 list_for_each_entry(pos, &o->head, link)
464 if (data->vlan.vlan == pos->u.vlan.vlan)
465 return -EEXIST;
042181f5 466
619c5cb6 467 return 0;
042181f5
VZ
468}
469
619c5cb6
VZ
470static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
471 union bnx2x_classification_ramrod_data *data)
042181f5 472{
619c5cb6
VZ
473 struct bnx2x_vlan_mac_registry_elem *pos;
474
475 list_for_each_entry(pos, &o->head, link)
476 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
477 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
478 ETH_ALEN)))
479 return -EEXIST;
042181f5 480
619c5cb6 481 return 0;
042181f5
VZ
482}
483
619c5cb6
VZ
484
485/* check_del() callbacks */
486static struct bnx2x_vlan_mac_registry_elem *
487 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
488 union bnx2x_classification_ramrod_data *data)
042181f5 489{
619c5cb6
VZ
490 struct bnx2x_vlan_mac_registry_elem *pos;
491
492 list_for_each_entry(pos, &o->head, link)
493 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
494 return pos;
042181f5 495
619c5cb6 496 return NULL;
042181f5
VZ
497}
498
619c5cb6
VZ
499static struct bnx2x_vlan_mac_registry_elem *
500 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
501 union bnx2x_classification_ramrod_data *data)
042181f5 502{
619c5cb6 503 struct bnx2x_vlan_mac_registry_elem *pos;
042181f5 504
619c5cb6
VZ
505 list_for_each_entry(pos, &o->head, link)
506 if (data->vlan.vlan == pos->u.vlan.vlan)
507 return pos;
508
509 return NULL;
042181f5
VZ
510}
511
619c5cb6
VZ
512static struct bnx2x_vlan_mac_registry_elem *
513 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
514 union bnx2x_classification_ramrod_data *data)
042181f5 515{
619c5cb6
VZ
516 struct bnx2x_vlan_mac_registry_elem *pos;
517
518 list_for_each_entry(pos, &o->head, link)
519 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
520 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
521 ETH_ALEN)))
522 return pos;
042181f5 523
619c5cb6 524 return NULL;
042181f5
VZ
525}
526
619c5cb6
VZ
527/* check_move() callback */
528static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
529 struct bnx2x_vlan_mac_obj *dst_o,
530 union bnx2x_classification_ramrod_data *data)
042181f5 531{
619c5cb6
VZ
532 struct bnx2x_vlan_mac_registry_elem *pos;
533 int rc;
534
535 /* Check if we can delete the requested configuration from the first
536 * object.
537 */
538 pos = src_o->check_del(src_o, data);
539
540 /* check if configuration can be added */
541 rc = dst_o->check_add(dst_o, data);
542
543 /* If this classification can not be added (is already set)
544 * or can't be deleted - return an error.
545 */
546 if (rc || !pos)
547 return false;
548
549 return true;
042181f5
VZ
550}
551
619c5cb6
VZ
552static bool bnx2x_check_move_always_err(
553 struct bnx2x_vlan_mac_obj *src_o,
554 struct bnx2x_vlan_mac_obj *dst_o,
555 union bnx2x_classification_ramrod_data *data)
042181f5 556{
619c5cb6 557 return false;
042181f5
VZ
558}
559
619c5cb6
VZ
560
561static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
042181f5 562{
619c5cb6
VZ
563 struct bnx2x_raw_obj *raw = &o->raw;
564 u8 rx_tx_flag = 0;
042181f5 565
619c5cb6
VZ
566 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
567 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
568 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
042181f5 569
619c5cb6
VZ
570 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
571 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
572 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
573
574 return rx_tx_flag;
042181f5
VZ
575}
576
619c5cb6
VZ
577/* LLH CAM line allocations */
578enum {
579 LLH_CAM_ISCSI_ETH_LINE = 0,
580 LLH_CAM_ETH_LINE,
581 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
582};
583
584static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
585 bool add, unsigned char *dev_addr, int index)
042181f5 586{
619c5cb6
VZ
587 u32 wb_data[2];
588 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
589 NIG_REG_LLH0_FUNC_MEM;
590
591 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
592 return;
593
594 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
595 (add ? "ADD" : "DELETE"), index);
596
597 if (add) {
598 /* LLH_FUNC_MEM is a u64 WB register */
599 reg_offset += 8*index;
042181f5 600
619c5cb6
VZ
601 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
602 (dev_addr[4] << 8) | dev_addr[5]);
603 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
042181f5 604
619c5cb6
VZ
605 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
606 }
042181f5 607
619c5cb6
VZ
608 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
609 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
610}
042181f5 611
619c5cb6
VZ
612/**
613 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
614 *
615 * @bp: device handle
616 * @o: queue for which we want to configure this rule
617 * @add: if true the command is an ADD command, DEL otherwise
618 * @opcode: CLASSIFY_RULE_OPCODE_XXX
619 * @hdr: pointer to a header to setup
620 *
621 */
622static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
623 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
624 struct eth_classify_cmd_header *hdr)
625{
626 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 627
619c5cb6
VZ
628 hdr->client_id = raw->cl_id;
629 hdr->func_id = raw->func_id;
042181f5 630
619c5cb6
VZ
631 /* Rx or/and Tx (internal switching) configuration ? */
632 hdr->cmd_general_data |=
633 bnx2x_vlan_mac_get_rx_tx_flag(o);
042181f5 634
619c5cb6
VZ
635 if (add)
636 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
042181f5 637
619c5cb6
VZ
638 hdr->cmd_general_data |=
639 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
640}
042181f5 641
619c5cb6
VZ
642/**
643 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
644 *
645 * @cid: connection id
646 * @type: BNX2X_FILTER_XXX_PENDING
647 * @hdr: poiter to header to setup
648 * @rule_cnt:
649 *
650 * currently we always configure one rule and echo field to contain a CID and an
651 * opcode type.
652 */
653static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
654 struct eth_classify_header *hdr, int rule_cnt)
655{
656 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
657 hdr->rule_cnt = (u8)rule_cnt;
658}
042181f5 659
042181f5 660
619c5cb6
VZ
661/* hw_config() callbacks */
662static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
663 struct bnx2x_vlan_mac_obj *o,
664 struct bnx2x_exeq_elem *elem, int rule_idx,
665 int cam_offset)
666{
667 struct bnx2x_raw_obj *raw = &o->raw;
668 struct eth_classify_rules_ramrod_data *data =
669 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
670 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
671 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
672 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
673 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
674 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
675
676 /*
677 * Set LLH CAM entry: currently only iSCSI and ETH macs are
678 * relevant. In addition, current implementation is tuned for a
679 * single ETH MAC.
680 *
681 * When multiple unicast ETH MACs PF configuration in switch
682 * independent mode is required (NetQ, multiple netdev MACs,
683 * etc.), consider better utilisation of 8 per function MAC
684 * entries in the LLH register. There is also
685 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
686 * total number of CAM entries to 16.
687 *
688 * Currently we won't configure NIG for MACs other than a primary ETH
689 * MAC and iSCSI L2 MAC.
690 *
691 * If this MAC is moving from one Queue to another, no need to change
692 * NIG configuration.
693 */
694 if (cmd != BNX2X_VLAN_MAC_MOVE) {
695 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
696 bnx2x_set_mac_in_nig(bp, add, mac,
697 LLH_CAM_ISCSI_ETH_LINE);
698 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
699 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
042181f5
VZ
700 }
701
619c5cb6
VZ
702 /* Reset the ramrod data buffer for the first rule */
703 if (rule_idx == 0)
704 memset(data, 0, sizeof(*data));
705
706 /* Setup a command header */
707 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
708 &rule_entry->mac.header);
709
710 DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
711 "Queue %d\n", (add ? "add" : "delete"),
712 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
713
714 /* Set a MAC itself */
715 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
716 &rule_entry->mac.mac_mid,
717 &rule_entry->mac.mac_lsb, mac);
718
719 /* MOVE: Add a rule that will add this MAC to the target Queue */
720 if (cmd == BNX2X_VLAN_MAC_MOVE) {
721 rule_entry++;
722 rule_cnt++;
723
724 /* Setup ramrod data */
725 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
726 elem->cmd_data.vlan_mac.target_obj,
727 true, CLASSIFY_RULE_OPCODE_MAC,
728 &rule_entry->mac.header);
729
730 /* Set a MAC itself */
731 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
732 &rule_entry->mac.mac_mid,
733 &rule_entry->mac.mac_lsb, mac);
042181f5 734 }
619c5cb6
VZ
735
736 /* Set the ramrod data header */
737 /* TODO: take this to the higher level in order to prevent multiple
738 writing */
739 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
740 rule_cnt);
042181f5
VZ
741}
742
619c5cb6
VZ
743/**
744 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
745 *
746 * @bp: device handle
747 * @o: queue
748 * @type:
749 * @cam_offset: offset in cam memory
750 * @hdr: pointer to a header to setup
751 *
752 * E1/E1H
753 */
754static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
755 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
756 struct mac_configuration_hdr *hdr)
042181f5 757{
619c5cb6 758 struct bnx2x_raw_obj *r = &o->raw;
042181f5 759
619c5cb6
VZ
760 hdr->length = 1;
761 hdr->offset = (u8)cam_offset;
762 hdr->client_id = 0xff;
763 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
764}
042181f5 765
619c5cb6
VZ
766static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
767 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
768 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
769{
770 struct bnx2x_raw_obj *r = &o->raw;
771 u32 cl_bit_vec = (1 << r->cl_id);
772
773 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
774 cfg_entry->pf_id = r->func_id;
775 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
776
777 if (add) {
778 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
779 T_ETH_MAC_COMMAND_SET);
780 SET_FLAG(cfg_entry->flags,
781 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
782
783 /* Set a MAC in a ramrod data */
784 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
785 &cfg_entry->middle_mac_addr,
786 &cfg_entry->lsb_mac_addr, mac);
787 } else
788 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
789 T_ETH_MAC_COMMAND_INVALIDATE);
790}
042181f5 791
619c5cb6
VZ
792static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
793 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
794 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
795{
796 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
797 struct bnx2x_raw_obj *raw = &o->raw;
042181f5 798
619c5cb6
VZ
799 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
800 &config->hdr);
801 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
802 cfg_entry);
042181f5 803
619c5cb6
VZ
804 DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
805 (add ? "setting" : "clearing"),
806 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
042181f5
VZ
807}
808
619c5cb6
VZ
809/**
810 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
811 *
812 * @bp: device handle
813 * @o: bnx2x_vlan_mac_obj
814 * @elem: bnx2x_exeq_elem
815 * @rule_idx: rule_idx
816 * @cam_offset: cam_offset
817 */
818static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
819 struct bnx2x_vlan_mac_obj *o,
820 struct bnx2x_exeq_elem *elem, int rule_idx,
821 int cam_offset)
042181f5 822{
619c5cb6
VZ
823 struct bnx2x_raw_obj *raw = &o->raw;
824 struct mac_configuration_cmd *config =
825 (struct mac_configuration_cmd *)(raw->rdata);
826 /*
827 * 57710 and 57711 do not support MOVE command,
828 * so it's either ADD or DEL
829 */
830 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
831 true : false;
042181f5 832
619c5cb6
VZ
833 /* Reset the ramrod data buffer */
834 memset(config, 0, sizeof(*config));
042181f5 835
619c5cb6
VZ
836 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
837 cam_offset, add,
838 elem->cmd_data.vlan_mac.u.mac.mac, 0,
839 ETH_VLAN_FILTER_ANY_VLAN, config);
840}
042181f5 841
619c5cb6
VZ
842static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
843 struct bnx2x_vlan_mac_obj *o,
844 struct bnx2x_exeq_elem *elem, int rule_idx,
845 int cam_offset)
846{
847 struct bnx2x_raw_obj *raw = &o->raw;
848 struct eth_classify_rules_ramrod_data *data =
849 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
850 int rule_cnt = rule_idx + 1;
851 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
852 int cmd = elem->cmd_data.vlan_mac.cmd;
853 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
854 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
855
856 /* Reset the ramrod data buffer for the first rule */
857 if (rule_idx == 0)
858 memset(data, 0, sizeof(*data));
859
860 /* Set a rule header */
861 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
862 &rule_entry->vlan.header);
863
864 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
865 vlan);
866
867 /* Set a VLAN itself */
868 rule_entry->vlan.vlan = cpu_to_le16(vlan);
869
870 /* MOVE: Add a rule that will add this MAC to the target Queue */
871 if (cmd == BNX2X_VLAN_MAC_MOVE) {
872 rule_entry++;
873 rule_cnt++;
874
875 /* Setup ramrod data */
876 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
877 elem->cmd_data.vlan_mac.target_obj,
878 true, CLASSIFY_RULE_OPCODE_VLAN,
879 &rule_entry->vlan.header);
880
881 /* Set a VLAN itself */
882 rule_entry->vlan.vlan = cpu_to_le16(vlan);
883 }
042181f5 884
619c5cb6
VZ
885 /* Set the ramrod data header */
886 /* TODO: take this to the higher level in order to prevent multiple
887 writing */
888 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
889 rule_cnt);
890}
042181f5 891
619c5cb6
VZ
892static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
893 struct bnx2x_vlan_mac_obj *o,
894 struct bnx2x_exeq_elem *elem,
895 int rule_idx, int cam_offset)
896{
897 struct bnx2x_raw_obj *raw = &o->raw;
898 struct eth_classify_rules_ramrod_data *data =
899 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
900 int rule_cnt = rule_idx + 1;
901 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
902 int cmd = elem->cmd_data.vlan_mac.cmd;
903 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
904 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
905 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
906
907
908 /* Reset the ramrod data buffer for the first rule */
909 if (rule_idx == 0)
910 memset(data, 0, sizeof(*data));
911
912 /* Set a rule header */
913 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
914 &rule_entry->pair.header);
915
916 /* Set VLAN and MAC themselvs */
917 rule_entry->pair.vlan = cpu_to_le16(vlan);
918 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
919 &rule_entry->pair.mac_mid,
920 &rule_entry->pair.mac_lsb, mac);
921
922 /* MOVE: Add a rule that will add this MAC to the target Queue */
923 if (cmd == BNX2X_VLAN_MAC_MOVE) {
924 rule_entry++;
925 rule_cnt++;
926
927 /* Setup ramrod data */
928 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
929 elem->cmd_data.vlan_mac.target_obj,
930 true, CLASSIFY_RULE_OPCODE_PAIR,
931 &rule_entry->pair.header);
932
933 /* Set a VLAN itself */
934 rule_entry->pair.vlan = cpu_to_le16(vlan);
935 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
936 &rule_entry->pair.mac_mid,
937 &rule_entry->pair.mac_lsb, mac);
042181f5
VZ
938 }
939
619c5cb6
VZ
940 /* Set the ramrod data header */
941 /* TODO: take this to the higher level in order to prevent multiple
942 writing */
943 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
944 rule_cnt);
945}
042181f5 946
619c5cb6
VZ
947/**
948 * bnx2x_set_one_vlan_mac_e1h -
949 *
950 * @bp: device handle
951 * @o: bnx2x_vlan_mac_obj
952 * @elem: bnx2x_exeq_elem
953 * @rule_idx: rule_idx
954 * @cam_offset: cam_offset
955 */
956static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
957 struct bnx2x_vlan_mac_obj *o,
958 struct bnx2x_exeq_elem *elem,
959 int rule_idx, int cam_offset)
960{
961 struct bnx2x_raw_obj *raw = &o->raw;
962 struct mac_configuration_cmd *config =
963 (struct mac_configuration_cmd *)(raw->rdata);
964 /*
965 * 57710 and 57711 do not support MOVE command,
966 * so it's either ADD or DEL
042181f5 967 */
619c5cb6
VZ
968 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
969 true : false;
042181f5 970
619c5cb6
VZ
971 /* Reset the ramrod data buffer */
972 memset(config, 0, sizeof(*config));
042181f5 973
619c5cb6
VZ
974 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
975 cam_offset, add,
976 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
977 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
978 ETH_VLAN_FILTER_CLASSIFY, config);
042181f5
VZ
979}
980
619c5cb6
VZ
981#define list_next_entry(pos, member) \
982 list_entry((pos)->member.next, typeof(*(pos)), member)
983
984/**
985 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
986 *
987 * @bp: device handle
988 * @p: command parameters
989 * @ppos: pointer to the cooky
990 *
991 * reconfigure next MAC/VLAN/VLAN-MAC element from the
992 * previously configured elements list.
993 *
994 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
995 * into an account
996 *
997 * pointer to the cooky - that should be given back in the next call to make
998 * function handle the next element. If *ppos is set to NULL it will restart the
999 * iterator. If returned *ppos == NULL this means that the last element has been
1000 * handled.
1001 *
1002 */
1003static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1004 struct bnx2x_vlan_mac_ramrod_params *p,
1005 struct bnx2x_vlan_mac_registry_elem **ppos)
1006{
1007 struct bnx2x_vlan_mac_registry_elem *pos;
1008 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1009
1010 /* If list is empty - there is nothing to do here */
1011 if (list_empty(&o->head)) {
1012 *ppos = NULL;
1013 return 0;
1014 }
1015
1016 /* make a step... */
1017 if (*ppos == NULL)
1018 *ppos = list_first_entry(&o->head,
1019 struct bnx2x_vlan_mac_registry_elem,
1020 link);
1021 else
1022 *ppos = list_next_entry(*ppos, link);
1023
1024 pos = *ppos;
1025
1026 /* If it's the last step - return NULL */
1027 if (list_is_last(&pos->link, &o->head))
1028 *ppos = NULL;
1029
1030 /* Prepare a 'user_req' */
1031 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1032
1033 /* Set the command */
1034 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1035
1036 /* Set vlan_mac_flags */
1037 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1038
1039 /* Set a restore bit */
1040 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1041
1042 return bnx2x_config_vlan_mac(bp, p);
1043}
1044
1045/*
1046 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1047 * pointer to an element with a specific criteria and NULL if such an element
1048 * hasn't been found.
1049 */
1050static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1051 struct bnx2x_exe_queue_obj *o,
1052 struct bnx2x_exeq_elem *elem)
1053{
1054 struct bnx2x_exeq_elem *pos;
1055 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1056
1057 /* Check pending for execution commands */
1058 list_for_each_entry(pos, &o->exe_queue, link)
1059 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1060 sizeof(*data)) &&
1061 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1062 return pos;
1063
1064 return NULL;
1065}
1066
1067static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1068 struct bnx2x_exe_queue_obj *o,
1069 struct bnx2x_exeq_elem *elem)
1070{
1071 struct bnx2x_exeq_elem *pos;
1072 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1073
1074 /* Check pending for execution commands */
1075 list_for_each_entry(pos, &o->exe_queue, link)
1076 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1077 sizeof(*data)) &&
1078 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1079 return pos;
1080
1081 return NULL;
1082}
1083
1084static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1085 struct bnx2x_exe_queue_obj *o,
1086 struct bnx2x_exeq_elem *elem)
1087{
1088 struct bnx2x_exeq_elem *pos;
1089 struct bnx2x_vlan_mac_ramrod_data *data =
1090 &elem->cmd_data.vlan_mac.u.vlan_mac;
1091
1092 /* Check pending for execution commands */
1093 list_for_each_entry(pos, &o->exe_queue, link)
1094 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1095 sizeof(*data)) &&
1096 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1097 return pos;
1098
1099 return NULL;
1100}
1101
1102/**
1103 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1104 *
1105 * @bp: device handle
1106 * @qo: bnx2x_qable_obj
1107 * @elem: bnx2x_exeq_elem
1108 *
1109 * Checks that the requested configuration can be added. If yes and if
1110 * requested, consume CAM credit.
1111 *
1112 * The 'validate' is run after the 'optimize'.
1113 *
1114 */
1115static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1116 union bnx2x_qable_obj *qo,
1117 struct bnx2x_exeq_elem *elem)
1118{
1119 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1120 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1121 int rc;
1122
1123 /* Check the registry */
1124 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1125 if (rc) {
1126 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1127 "current registry state\n");
1128 return rc;
1129 }
1130
1131 /*
1132 * Check if there is a pending ADD command for this
1133 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1134 */
1135 if (exeq->get(exeq, elem)) {
1136 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1137 return -EEXIST;
1138 }
1139
1140 /*
1141 * TODO: Check the pending MOVE from other objects where this
1142 * object is a destination object.
1143 */
1144
1145 /* Consume the credit if not requested not to */
1146 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1147 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1148 o->get_credit(o)))
1149 return -EINVAL;
1150
1151 return 0;
1152}
1153
1154/**
1155 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1156 *
1157 * @bp: device handle
1158 * @qo: quable object to check
1159 * @elem: element that needs to be deleted
1160 *
1161 * Checks that the requested configuration can be deleted. If yes and if
1162 * requested, returns a CAM credit.
1163 *
1164 * The 'validate' is run after the 'optimize'.
1165 */
1166static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1167 union bnx2x_qable_obj *qo,
1168 struct bnx2x_exeq_elem *elem)
1169{
1170 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1171 struct bnx2x_vlan_mac_registry_elem *pos;
1172 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1173 struct bnx2x_exeq_elem query_elem;
1174
1175 /* If this classification can not be deleted (doesn't exist)
1176 * - return a BNX2X_EXIST.
1177 */
1178 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1179 if (!pos) {
1180 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1181 "current registry state\n");
1182 return -EEXIST;
1183 }
1184
1185 /*
1186 * Check if there are pending DEL or MOVE commands for this
1187 * MAC/VLAN/VLAN-MAC. Return an error if so.
1188 */
1189 memcpy(&query_elem, elem, sizeof(query_elem));
1190
1191 /* Check for MOVE commands */
1192 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1193 if (exeq->get(exeq, &query_elem)) {
1194 BNX2X_ERR("There is a pending MOVE command already\n");
1195 return -EINVAL;
1196 }
1197
1198 /* Check for DEL commands */
1199 if (exeq->get(exeq, elem)) {
1200 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1201 return -EEXIST;
1202 }
1203
1204 /* Return the credit to the credit pool if not requested not to */
1205 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1206 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1207 o->put_credit(o))) {
1208 BNX2X_ERR("Failed to return a credit\n");
1209 return -EINVAL;
1210 }
1211
1212 return 0;
1213}
1214
1215/**
1216 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1217 *
1218 * @bp: device handle
1219 * @qo: quable object to check (source)
1220 * @elem: element that needs to be moved
1221 *
1222 * Checks that the requested configuration can be moved. If yes and if
1223 * requested, returns a CAM credit.
1224 *
1225 * The 'validate' is run after the 'optimize'.
1226 */
1227static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1228 union bnx2x_qable_obj *qo,
1229 struct bnx2x_exeq_elem *elem)
1230{
1231 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1232 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1233 struct bnx2x_exeq_elem query_elem;
1234 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1235 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1236
1237 /*
1238 * Check if we can perform this operation based on the current registry
1239 * state.
1240 */
1241 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1242 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1243 "current registry state\n");
1244 return -EINVAL;
1245 }
1246
1247 /*
1248 * Check if there is an already pending DEL or MOVE command for the
1249 * source object or ADD command for a destination object. Return an
1250 * error if so.
1251 */
1252 memcpy(&query_elem, elem, sizeof(query_elem));
1253
1254 /* Check DEL on source */
1255 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1256 if (src_exeq->get(src_exeq, &query_elem)) {
1257 BNX2X_ERR("There is a pending DEL command on the source "
1258 "queue already\n");
1259 return -EINVAL;
1260 }
1261
1262 /* Check MOVE on source */
1263 if (src_exeq->get(src_exeq, elem)) {
1264 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1265 return -EEXIST;
1266 }
1267
1268 /* Check ADD on destination */
1269 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1270 if (dest_exeq->get(dest_exeq, &query_elem)) {
1271 BNX2X_ERR("There is a pending ADD command on the "
1272 "destination queue already\n");
1273 return -EINVAL;
1274 }
1275
1276 /* Consume the credit if not requested not to */
1277 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1278 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1279 dest_o->get_credit(dest_o)))
1280 return -EINVAL;
1281
1282 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1283 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1284 src_o->put_credit(src_o))) {
1285 /* return the credit taken from dest... */
1286 dest_o->put_credit(dest_o);
1287 return -EINVAL;
1288 }
1289
1290 return 0;
1291}
1292
1293static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1294 union bnx2x_qable_obj *qo,
1295 struct bnx2x_exeq_elem *elem)
1296{
1297 switch (elem->cmd_data.vlan_mac.cmd) {
1298 case BNX2X_VLAN_MAC_ADD:
1299 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1300 case BNX2X_VLAN_MAC_DEL:
1301 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1302 case BNX2X_VLAN_MAC_MOVE:
1303 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1304 default:
1305 return -EINVAL;
1306 }
1307}
1308
1309/**
1310 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1311 *
1312 * @bp: device handle
1313 * @o: bnx2x_vlan_mac_obj
1314 *
1315 */
1316static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1317 struct bnx2x_vlan_mac_obj *o)
1318{
1319 int cnt = 5000, rc;
1320 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1321 struct bnx2x_raw_obj *raw = &o->raw;
1322
1323 while (cnt--) {
1324 /* Wait for the current command to complete */
1325 rc = raw->wait_comp(bp, raw);
1326 if (rc)
1327 return rc;
1328
1329 /* Wait until there are no pending commands */
1330 if (!bnx2x_exe_queue_empty(exeq))
1331 usleep_range(1000, 1000);
1332 else
1333 return 0;
1334 }
1335
1336 return -EBUSY;
1337}
1338
1339/**
1340 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1341 *
1342 * @bp: device handle
1343 * @o: bnx2x_vlan_mac_obj
1344 * @cqe:
1345 * @cont: if true schedule next execution chunk
1346 *
1347 */
1348static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1349 struct bnx2x_vlan_mac_obj *o,
1350 union event_ring_elem *cqe,
1351 unsigned long *ramrod_flags)
1352{
1353 struct bnx2x_raw_obj *r = &o->raw;
1354 int rc;
1355
1356 /* Reset pending list */
1357 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1358
1359 /* Clear pending */
1360 r->clear_pending(r);
1361
1362 /* If ramrod failed this is most likely a SW bug */
1363 if (cqe->message.error)
1364 return -EINVAL;
1365
1366 /* Run the next bulk of pending commands if requeted */
1367 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1368 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1369 if (rc < 0)
1370 return rc;
1371 }
1372
1373 /* If there is more work to do return PENDING */
1374 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1375 return 1;
1376
1377 return 0;
1378}
1379
1380/**
1381 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1382 *
1383 * @bp: device handle
1384 * @o: bnx2x_qable_obj
1385 * @elem: bnx2x_exeq_elem
1386 */
1387static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1388 union bnx2x_qable_obj *qo,
1389 struct bnx2x_exeq_elem *elem)
1390{
1391 struct bnx2x_exeq_elem query, *pos;
1392 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1393 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1394
1395 memcpy(&query, elem, sizeof(query));
1396
1397 switch (elem->cmd_data.vlan_mac.cmd) {
1398 case BNX2X_VLAN_MAC_ADD:
1399 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1400 break;
1401 case BNX2X_VLAN_MAC_DEL:
1402 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1403 break;
1404 default:
1405 /* Don't handle anything other than ADD or DEL */
1406 return 0;
1407 }
1408
1409 /* If we found the appropriate element - delete it */
1410 pos = exeq->get(exeq, &query);
1411 if (pos) {
1412
1413 /* Return the credit of the optimized command */
1414 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1415 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1416 if ((query.cmd_data.vlan_mac.cmd ==
1417 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1418 BNX2X_ERR("Failed to return the credit for the "
1419 "optimized ADD command\n");
1420 return -EINVAL;
1421 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1422 BNX2X_ERR("Failed to recover the credit from "
1423 "the optimized DEL command\n");
1424 return -EINVAL;
1425 }
1426 }
1427
1428 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1429 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1430 "ADD" : "DEL");
1431
1432 list_del(&pos->link);
1433 bnx2x_exe_queue_free_elem(bp, pos);
1434 return 1;
1435 }
1436
1437 return 0;
1438}
1439
1440/**
1441 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1442 *
1443 * @bp: device handle
1444 * @o:
1445 * @elem:
1446 * @restore:
1447 * @re:
1448 *
1449 * prepare a registry element according to the current command request.
1450 */
1451static inline int bnx2x_vlan_mac_get_registry_elem(
1452 struct bnx2x *bp,
1453 struct bnx2x_vlan_mac_obj *o,
1454 struct bnx2x_exeq_elem *elem,
1455 bool restore,
1456 struct bnx2x_vlan_mac_registry_elem **re)
1457{
1458 int cmd = elem->cmd_data.vlan_mac.cmd;
1459 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1460
1461 /* Allocate a new registry element if needed. */
1462 if (!restore &&
1463 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1464 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1465 if (!reg_elem)
1466 return -ENOMEM;
1467
1468 /* Get a new CAM offset */
1469 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1470 /*
1471 * This shell never happen, because we have checked the
1472 * CAM availiability in the 'validate'.
1473 */
1474 WARN_ON(1);
1475 kfree(reg_elem);
1476 return -EINVAL;
1477 }
1478
1479 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1480
1481 /* Set a VLAN-MAC data */
1482 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1483 sizeof(reg_elem->u));
1484
1485 /* Copy the flags (needed for DEL and RESTORE flows) */
1486 reg_elem->vlan_mac_flags =
1487 elem->cmd_data.vlan_mac.vlan_mac_flags;
1488 } else /* DEL, RESTORE */
1489 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1490
1491 *re = reg_elem;
1492 return 0;
1493}
1494
1495/**
1496 * bnx2x_execute_vlan_mac - execute vlan mac command
1497 *
1498 * @bp: device handle
1499 * @qo:
1500 * @exe_chunk:
1501 * @ramrod_flags:
1502 *
1503 * go and send a ramrod!
1504 */
1505static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1506 union bnx2x_qable_obj *qo,
1507 struct list_head *exe_chunk,
1508 unsigned long *ramrod_flags)
1509{
1510 struct bnx2x_exeq_elem *elem;
1511 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1512 struct bnx2x_raw_obj *r = &o->raw;
1513 int rc, idx = 0;
1514 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1515 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1516 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1517 int cmd;
1518
1519 /*
1520 * If DRIVER_ONLY execution is requested, cleanup a registry
1521 * and exit. Otherwise send a ramrod to FW.
1522 */
1523 if (!drv_only) {
1524 WARN_ON(r->check_pending(r));
1525
1526 /* Set pending */
1527 r->set_pending(r);
1528
1529 /* Fill tha ramrod data */
1530 list_for_each_entry(elem, exe_chunk, link) {
1531 cmd = elem->cmd_data.vlan_mac.cmd;
1532 /*
1533 * We will add to the target object in MOVE command, so
1534 * change the object for a CAM search.
1535 */
1536 if (cmd == BNX2X_VLAN_MAC_MOVE)
1537 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1538 else
1539 cam_obj = o;
1540
1541 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1542 elem, restore,
1543 &reg_elem);
1544 if (rc)
1545 goto error_exit;
1546
1547 WARN_ON(!reg_elem);
1548
1549 /* Push a new entry into the registry */
1550 if (!restore &&
1551 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1552 (cmd == BNX2X_VLAN_MAC_MOVE)))
1553 list_add(&reg_elem->link, &cam_obj->head);
1554
1555 /* Configure a single command in a ramrod data buffer */
1556 o->set_one_rule(bp, o, elem, idx,
1557 reg_elem->cam_offset);
1558
1559 /* MOVE command consumes 2 entries in the ramrod data */
1560 if (cmd == BNX2X_VLAN_MAC_MOVE)
1561 idx += 2;
1562 else
1563 idx++;
1564 }
1565
1566 /* Commit the data writes towards the memory */
1567 mb();
1568
1569 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1570 U64_HI(r->rdata_mapping),
1571 U64_LO(r->rdata_mapping),
1572 ETH_CONNECTION_TYPE);
1573 if (rc)
1574 goto error_exit;
1575 }
1576
1577 /* Now, when we are done with the ramrod - clean up the registry */
1578 list_for_each_entry(elem, exe_chunk, link) {
1579 cmd = elem->cmd_data.vlan_mac.cmd;
1580 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1581 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1582 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1583
1584 WARN_ON(!reg_elem);
1585
1586 o->put_cam_offset(o, reg_elem->cam_offset);
1587 list_del(&reg_elem->link);
1588 kfree(reg_elem);
1589 }
1590 }
1591
1592 if (!drv_only)
1593 return 1;
1594 else
1595 return 0;
1596
1597error_exit:
1598 r->clear_pending(r);
1599
1600 /* Cleanup a registry in case of a failure */
1601 list_for_each_entry(elem, exe_chunk, link) {
1602 cmd = elem->cmd_data.vlan_mac.cmd;
1603
1604 if (cmd == BNX2X_VLAN_MAC_MOVE)
1605 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1606 else
1607 cam_obj = o;
1608
1609 /* Delete all newly added above entries */
1610 if (!restore &&
1611 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1612 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1613 reg_elem = o->check_del(cam_obj,
1614 &elem->cmd_data.vlan_mac.u);
1615 if (reg_elem) {
1616 list_del(&reg_elem->link);
1617 kfree(reg_elem);
1618 }
1619 }
1620 }
1621
1622 return rc;
1623}
1624
1625static inline int bnx2x_vlan_mac_push_new_cmd(
1626 struct bnx2x *bp,
1627 struct bnx2x_vlan_mac_ramrod_params *p)
1628{
1629 struct bnx2x_exeq_elem *elem;
1630 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1631 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1632
1633 /* Allocate the execution queue element */
1634 elem = bnx2x_exe_queue_alloc_elem(bp);
1635 if (!elem)
1636 return -ENOMEM;
1637
1638 /* Set the command 'length' */
1639 switch (p->user_req.cmd) {
1640 case BNX2X_VLAN_MAC_MOVE:
1641 elem->cmd_len = 2;
1642 break;
1643 default:
1644 elem->cmd_len = 1;
1645 }
1646
1647 /* Fill the object specific info */
1648 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1649
1650 /* Try to add a new command to the pending list */
1651 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1652}
1653
1654/**
1655 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1656 *
1657 * @bp: device handle
1658 * @p:
1659 *
1660 */
1661int bnx2x_config_vlan_mac(
1662 struct bnx2x *bp,
1663 struct bnx2x_vlan_mac_ramrod_params *p)
1664{
1665 int rc = 0;
1666 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1667 unsigned long *ramrod_flags = &p->ramrod_flags;
1668 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1669 struct bnx2x_raw_obj *raw = &o->raw;
1670
1671 /*
1672 * Add new elements to the execution list for commands that require it.
1673 */
1674 if (!cont) {
1675 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1676 if (rc)
1677 return rc;
1678 }
1679
1680 /*
1681 * If nothing will be executed further in this iteration we want to
1682 * return PENDING if there are pending commands
1683 */
1684 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1685 rc = 1;
1686
1687 /* Execute commands if required */
1688 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1689 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1690 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1691 if (rc < 0)
1692 return rc;
1693 }
1694
1695 /*
1696 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1697 * then user want to wait until the last command is done.
1698 */
1699 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1700 /*
1701 * Wait maximum for the current exe_queue length iterations plus
1702 * one (for the current pending command).
1703 */
1704 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1705
1706 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1707 max_iterations--) {
1708
1709 /* Wait for the current command to complete */
1710 rc = raw->wait_comp(bp, raw);
1711 if (rc)
1712 return rc;
1713
1714 /* Make a next step */
1715 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1716 ramrod_flags);
1717 if (rc < 0)
1718 return rc;
1719 }
1720
1721 return 0;
1722 }
1723
1724 return rc;
1725}
1726
1727
1728
1729/**
1730 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1731 *
1732 * @bp: device handle
1733 * @o:
1734 * @vlan_mac_flags:
1735 * @ramrod_flags: execution flags to be used for this deletion
1736 *
1737 * if the last operation has completed successfully and there are no
1738 * moreelements left, positive value if the last operation has completed
1739 * successfully and there are more previously configured elements, negative
1740 * value is current operation has failed.
1741 */
1742static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1743 struct bnx2x_vlan_mac_obj *o,
1744 unsigned long *vlan_mac_flags,
1745 unsigned long *ramrod_flags)
1746{
1747 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1748 int rc = 0;
1749 struct bnx2x_vlan_mac_ramrod_params p;
1750 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1751 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1752
1753 /* Clear pending commands first */
1754
1755 spin_lock_bh(&exeq->lock);
1756
1757 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1758 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1759 *vlan_mac_flags)
1760 list_del(&exeq_pos->link);
1761 }
1762
1763 spin_unlock_bh(&exeq->lock);
1764
1765 /* Prepare a command request */
1766 memset(&p, 0, sizeof(p));
1767 p.vlan_mac_obj = o;
1768 p.ramrod_flags = *ramrod_flags;
1769 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1770
1771 /*
1772 * Add all but the last VLAN-MAC to the execution queue without actually
1773 * execution anything.
1774 */
1775 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1776 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1777 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1778
1779 list_for_each_entry(pos, &o->head, link) {
1780 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1781 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1782 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1783 rc = bnx2x_config_vlan_mac(bp, &p);
1784 if (rc < 0) {
1785 BNX2X_ERR("Failed to add a new DEL command\n");
1786 return rc;
1787 }
1788 }
1789 }
1790
1791 p.ramrod_flags = *ramrod_flags;
1792 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1793
1794 return bnx2x_config_vlan_mac(bp, &p);
1795}
1796
1797static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1798 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1799 unsigned long *pstate, bnx2x_obj_type type)
1800{
1801 raw->func_id = func_id;
1802 raw->cid = cid;
1803 raw->cl_id = cl_id;
1804 raw->rdata = rdata;
1805 raw->rdata_mapping = rdata_mapping;
1806 raw->state = state;
1807 raw->pstate = pstate;
1808 raw->obj_type = type;
1809 raw->check_pending = bnx2x_raw_check_pending;
1810 raw->clear_pending = bnx2x_raw_clear_pending;
1811 raw->set_pending = bnx2x_raw_set_pending;
1812 raw->wait_comp = bnx2x_raw_wait;
1813}
1814
1815static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1816 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1817 int state, unsigned long *pstate, bnx2x_obj_type type,
1818 struct bnx2x_credit_pool_obj *macs_pool,
1819 struct bnx2x_credit_pool_obj *vlans_pool)
1820{
1821 INIT_LIST_HEAD(&o->head);
1822
1823 o->macs_pool = macs_pool;
1824 o->vlans_pool = vlans_pool;
1825
1826 o->delete_all = bnx2x_vlan_mac_del_all;
1827 o->restore = bnx2x_vlan_mac_restore;
1828 o->complete = bnx2x_complete_vlan_mac;
1829 o->wait = bnx2x_wait_vlan_mac;
1830
1831 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1832 state, pstate, type);
1833}
1834
1835
1836void bnx2x_init_mac_obj(struct bnx2x *bp,
1837 struct bnx2x_vlan_mac_obj *mac_obj,
1838 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1839 dma_addr_t rdata_mapping, int state,
1840 unsigned long *pstate, bnx2x_obj_type type,
1841 struct bnx2x_credit_pool_obj *macs_pool)
1842{
1843 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1844
1845 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1846 rdata_mapping, state, pstate, type,
1847 macs_pool, NULL);
1848
1849 /* CAM credit pool handling */
1850 mac_obj->get_credit = bnx2x_get_credit_mac;
1851 mac_obj->put_credit = bnx2x_put_credit_mac;
1852 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1853 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1854
1855 if (CHIP_IS_E1x(bp)) {
1856 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1857 mac_obj->check_del = bnx2x_check_mac_del;
1858 mac_obj->check_add = bnx2x_check_mac_add;
1859 mac_obj->check_move = bnx2x_check_move_always_err;
1860 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1861
1862 /* Exe Queue */
1863 bnx2x_exe_queue_init(bp,
1864 &mac_obj->exe_queue, 1, qable_obj,
1865 bnx2x_validate_vlan_mac,
1866 bnx2x_optimize_vlan_mac,
1867 bnx2x_execute_vlan_mac,
1868 bnx2x_exeq_get_mac);
1869 } else {
1870 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1871 mac_obj->check_del = bnx2x_check_mac_del;
1872 mac_obj->check_add = bnx2x_check_mac_add;
1873 mac_obj->check_move = bnx2x_check_move;
1874 mac_obj->ramrod_cmd =
1875 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1876
1877 /* Exe Queue */
1878 bnx2x_exe_queue_init(bp,
1879 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1880 qable_obj, bnx2x_validate_vlan_mac,
1881 bnx2x_optimize_vlan_mac,
1882 bnx2x_execute_vlan_mac,
1883 bnx2x_exeq_get_mac);
1884 }
1885}
1886
1887void bnx2x_init_vlan_obj(struct bnx2x *bp,
1888 struct bnx2x_vlan_mac_obj *vlan_obj,
1889 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1890 dma_addr_t rdata_mapping, int state,
1891 unsigned long *pstate, bnx2x_obj_type type,
1892 struct bnx2x_credit_pool_obj *vlans_pool)
1893{
1894 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1895
1896 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1897 rdata_mapping, state, pstate, type, NULL,
1898 vlans_pool);
1899
1900 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1901 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1902 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1903 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1904
1905 if (CHIP_IS_E1x(bp)) {
1906 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1907 BUG();
1908 } else {
1909 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1910 vlan_obj->check_del = bnx2x_check_vlan_del;
1911 vlan_obj->check_add = bnx2x_check_vlan_add;
1912 vlan_obj->check_move = bnx2x_check_move;
1913 vlan_obj->ramrod_cmd =
1914 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1915
1916 /* Exe Queue */
1917 bnx2x_exe_queue_init(bp,
1918 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1919 qable_obj, bnx2x_validate_vlan_mac,
1920 bnx2x_optimize_vlan_mac,
1921 bnx2x_execute_vlan_mac,
1922 bnx2x_exeq_get_vlan);
1923 }
1924}
1925
1926void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1927 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1928 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1929 dma_addr_t rdata_mapping, int state,
1930 unsigned long *pstate, bnx2x_obj_type type,
1931 struct bnx2x_credit_pool_obj *macs_pool,
1932 struct bnx2x_credit_pool_obj *vlans_pool)
1933{
1934 union bnx2x_qable_obj *qable_obj =
1935 (union bnx2x_qable_obj *)vlan_mac_obj;
1936
1937 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1938 rdata_mapping, state, pstate, type,
1939 macs_pool, vlans_pool);
1940
1941 /* CAM pool handling */
1942 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1943 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1944 /*
1945 * CAM offset is relevant for 57710 and 57711 chips only which have a
1946 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1947 * will be taken from MACs' pool object only.
1948 */
1949 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1950 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1951
1952 if (CHIP_IS_E1(bp)) {
1953 BNX2X_ERR("Do not support chips others than E2\n");
1954 BUG();
1955 } else if (CHIP_IS_E1H(bp)) {
1956 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
1957 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1958 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1959 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
1960 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1961
1962 /* Exe Queue */
1963 bnx2x_exe_queue_init(bp,
1964 &vlan_mac_obj->exe_queue, 1, qable_obj,
1965 bnx2x_validate_vlan_mac,
1966 bnx2x_optimize_vlan_mac,
1967 bnx2x_execute_vlan_mac,
1968 bnx2x_exeq_get_vlan_mac);
1969 } else {
1970 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
1971 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1972 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1973 vlan_mac_obj->check_move = bnx2x_check_move;
1974 vlan_mac_obj->ramrod_cmd =
1975 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1976
1977 /* Exe Queue */
1978 bnx2x_exe_queue_init(bp,
1979 &vlan_mac_obj->exe_queue,
1980 CLASSIFY_RULES_COUNT,
1981 qable_obj, bnx2x_validate_vlan_mac,
1982 bnx2x_optimize_vlan_mac,
1983 bnx2x_execute_vlan_mac,
1984 bnx2x_exeq_get_vlan_mac);
1985 }
1986
1987}
1988
1989/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1990static inline void __storm_memset_mac_filters(struct bnx2x *bp,
1991 struct tstorm_eth_mac_filter_config *mac_filters,
1992 u16 pf_id)
1993{
1994 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1995
1996 u32 addr = BAR_TSTRORM_INTMEM +
1997 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1998
1999 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2000}
2001
2002static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2003 struct bnx2x_rx_mode_ramrod_params *p)
2004{
2005 /* update the bp MAC filter structure */
2006 u32 mask = (1 << p->cl_id);
2007
2008 struct tstorm_eth_mac_filter_config *mac_filters =
2009 (struct tstorm_eth_mac_filter_config *)p->rdata;
2010
2011 /* initial seeting is drop-all */
2012 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2013 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2014 u8 unmatched_unicast = 0;
2015
2016 /* In e1x there we only take into account rx acceot flag since tx switching
2017 * isn't enabled. */
2018 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2019 /* accept matched ucast */
2020 drop_all_ucast = 0;
2021
2022 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2023 /* accept matched mcast */
2024 drop_all_mcast = 0;
2025
2026 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2027 /* accept all mcast */
2028 drop_all_ucast = 0;
2029 accp_all_ucast = 1;
2030 }
2031 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2032 /* accept all mcast */
2033 drop_all_mcast = 0;
2034 accp_all_mcast = 1;
2035 }
2036 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2037 /* accept (all) bcast */
2038 accp_all_bcast = 1;
2039 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2040 /* accept unmatched unicasts */
2041 unmatched_unicast = 1;
2042
2043 mac_filters->ucast_drop_all = drop_all_ucast ?
2044 mac_filters->ucast_drop_all | mask :
2045 mac_filters->ucast_drop_all & ~mask;
2046
2047 mac_filters->mcast_drop_all = drop_all_mcast ?
2048 mac_filters->mcast_drop_all | mask :
2049 mac_filters->mcast_drop_all & ~mask;
2050
2051 mac_filters->ucast_accept_all = accp_all_ucast ?
2052 mac_filters->ucast_accept_all | mask :
2053 mac_filters->ucast_accept_all & ~mask;
2054
2055 mac_filters->mcast_accept_all = accp_all_mcast ?
2056 mac_filters->mcast_accept_all | mask :
2057 mac_filters->mcast_accept_all & ~mask;
2058
2059 mac_filters->bcast_accept_all = accp_all_bcast ?
2060 mac_filters->bcast_accept_all | mask :
2061 mac_filters->bcast_accept_all & ~mask;
2062
2063 mac_filters->unmatched_unicast = unmatched_unicast ?
2064 mac_filters->unmatched_unicast | mask :
2065 mac_filters->unmatched_unicast & ~mask;
2066
2067 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2068 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2069 mac_filters->ucast_drop_all,
2070 mac_filters->mcast_drop_all,
2071 mac_filters->ucast_accept_all,
2072 mac_filters->mcast_accept_all,
2073 mac_filters->bcast_accept_all);
2074
2075 /* write the MAC filter structure*/
2076 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2077
2078 /* The operation is completed */
2079 clear_bit(p->state, p->pstate);
2080 smp_mb__after_clear_bit();
2081
2082 return 0;
2083}
2084
2085/* Setup ramrod data */
2086static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2087 struct eth_classify_header *hdr,
2088 u8 rule_cnt)
2089{
2090 hdr->echo = cid;
2091 hdr->rule_cnt = rule_cnt;
2092}
2093
2094static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2095 unsigned long accept_flags,
2096 struct eth_filter_rules_cmd *cmd,
2097 bool clear_accept_all)
2098{
2099 u16 state;
2100
2101 /* start with 'drop-all' */
2102 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2103 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2104
2105 if (accept_flags) {
2106 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2107 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2108
2109 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2110 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2111
2112 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2113 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2114 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2115 }
2116
2117 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2118 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2119 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2120 }
2121 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2122 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2123
2124 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2125 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2126 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2127 }
2128 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2129 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2130 }
2131
2132 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2133 if (clear_accept_all) {
2134 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2135 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2136 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2137 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2138 }
2139
2140 cmd->state = cpu_to_le16(state);
2141
2142}
2143
2144static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2145 struct bnx2x_rx_mode_ramrod_params *p)
2146{
2147 struct eth_filter_rules_ramrod_data *data = p->rdata;
2148 int rc;
2149 u8 rule_idx = 0;
2150
2151 /* Reset the ramrod data buffer */
2152 memset(data, 0, sizeof(*data));
2153
2154 /* Setup ramrod data */
2155
2156 /* Tx (internal switching) */
2157 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2158 data->rules[rule_idx].client_id = p->cl_id;
2159 data->rules[rule_idx].func_id = p->func_id;
2160
2161 data->rules[rule_idx].cmd_general_data =
2162 ETH_FILTER_RULES_CMD_TX_CMD;
2163
2164 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2165 &(data->rules[rule_idx++]), false);
2166 }
2167
2168 /* Rx */
2169 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2170 data->rules[rule_idx].client_id = p->cl_id;
2171 data->rules[rule_idx].func_id = p->func_id;
2172
2173 data->rules[rule_idx].cmd_general_data =
2174 ETH_FILTER_RULES_CMD_RX_CMD;
2175
2176 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2177 &(data->rules[rule_idx++]), false);
2178 }
2179
2180
2181 /*
2182 * If FCoE Queue configuration has been requested configure the Rx and
2183 * internal switching modes for this queue in separate rules.
2184 *
2185 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2186 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2187 */
2188 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2189 /* Tx (internal switching) */
2190 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2191 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2192 data->rules[rule_idx].func_id = p->func_id;
2193
2194 data->rules[rule_idx].cmd_general_data =
2195 ETH_FILTER_RULES_CMD_TX_CMD;
2196
2197 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2198 &(data->rules[rule_idx++]),
2199 true);
2200 }
2201
2202 /* Rx */
2203 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2204 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2205 data->rules[rule_idx].func_id = p->func_id;
2206
2207 data->rules[rule_idx].cmd_general_data =
2208 ETH_FILTER_RULES_CMD_RX_CMD;
2209
2210 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2211 &(data->rules[rule_idx++]),
2212 true);
2213 }
2214 }
2215
2216 /*
2217 * Set the ramrod header (most importantly - number of rules to
2218 * configure).
2219 */
2220 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2221
2222 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2223 "tx_accept_flags 0x%lx\n",
2224 data->header.rule_cnt, p->rx_accept_flags,
2225 p->tx_accept_flags);
2226
2227 /* Commit writes towards the memory before sending a ramrod */
2228 mb();
2229
2230 /* Send a ramrod */
2231 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2232 U64_HI(p->rdata_mapping),
2233 U64_LO(p->rdata_mapping),
2234 ETH_CONNECTION_TYPE);
2235 if (rc)
2236 return rc;
2237
2238 /* Ramrod completion is pending */
2239 return 1;
2240}
2241
2242static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2243 struct bnx2x_rx_mode_ramrod_params *p)
2244{
2245 return bnx2x_state_wait(bp, p->state, p->pstate);
2246}
2247
2248static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2249 struct bnx2x_rx_mode_ramrod_params *p)
2250{
2251 /* Do nothing */
2252 return 0;
2253}
2254
2255int bnx2x_config_rx_mode(struct bnx2x *bp,
2256 struct bnx2x_rx_mode_ramrod_params *p)
2257{
2258 int rc;
2259
2260 /* Configure the new classification in the chip */
2261 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2262 if (rc < 0)
2263 return rc;
2264
2265 /* Wait for a ramrod completion if was requested */
2266 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2267 rc = p->rx_mode_obj->wait_comp(bp, p);
2268 if (rc)
2269 return rc;
2270 }
2271
2272 return rc;
2273}
2274
2275void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2276 struct bnx2x_rx_mode_obj *o)
2277{
2278 if (CHIP_IS_E1x(bp)) {
2279 o->wait_comp = bnx2x_empty_rx_mode_wait;
2280 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2281 } else {
2282 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2283 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2284 }
2285}
2286
2287/********************* Multicast verbs: SET, CLEAR ****************************/
2288static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2289{
2290 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2291}
2292
2293struct bnx2x_mcast_mac_elem {
2294 struct list_head link;
2295 u8 mac[ETH_ALEN];
2296 u8 pad[2]; /* For a natural alignment of the following buffer */
2297};
2298
2299struct bnx2x_pending_mcast_cmd {
2300 struct list_head link;
2301 int type; /* BNX2X_MCAST_CMD_X */
2302 union {
2303 struct list_head macs_head;
2304 u32 macs_num; /* Needed for DEL command */
2305 int next_bin; /* Needed for RESTORE flow with aprox match */
2306 } data;
2307
2308 bool done; /* set to true, when the command has been handled,
2309 * practically used in 57712 handling only, where one pending
2310 * command may be handled in a few operations. As long as for
2311 * other chips every operation handling is completed in a
2312 * single ramrod, there is no need to utilize this field.
2313 */
2314};
2315
2316static int bnx2x_mcast_wait(struct bnx2x *bp,
2317 struct bnx2x_mcast_obj *o)
2318{
2319 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2320 o->raw.wait_comp(bp, &o->raw))
2321 return -EBUSY;
2322
2323 return 0;
2324}
2325
2326static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2327 struct bnx2x_mcast_obj *o,
2328 struct bnx2x_mcast_ramrod_params *p,
2329 int cmd)
2330{
2331 int total_sz;
2332 struct bnx2x_pending_mcast_cmd *new_cmd;
2333 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2334 struct bnx2x_mcast_list_elem *pos;
2335 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2336 p->mcast_list_len : 0);
2337
2338 /* If the command is empty ("handle pending commands only"), break */
2339 if (!p->mcast_list_len)
2340 return 0;
2341
2342 total_sz = sizeof(*new_cmd) +
2343 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2344
2345 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2346 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2347
2348 if (!new_cmd)
2349 return -ENOMEM;
2350
2351 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2352 "macs_list_len=%d\n", cmd, macs_list_len);
2353
2354 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2355
2356 new_cmd->type = cmd;
2357 new_cmd->done = false;
2358
2359 switch (cmd) {
2360 case BNX2X_MCAST_CMD_ADD:
2361 cur_mac = (struct bnx2x_mcast_mac_elem *)
2362 ((u8 *)new_cmd + sizeof(*new_cmd));
2363
2364 /* Push the MACs of the current command into the pendig command
2365 * MACs list: FIFO
2366 */
2367 list_for_each_entry(pos, &p->mcast_list, link) {
2368 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2369 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2370 cur_mac++;
2371 }
2372
2373 break;
2374
2375 case BNX2X_MCAST_CMD_DEL:
2376 new_cmd->data.macs_num = p->mcast_list_len;
2377 break;
2378
2379 case BNX2X_MCAST_CMD_RESTORE:
2380 new_cmd->data.next_bin = 0;
2381 break;
2382
2383 default:
2384 BNX2X_ERR("Unknown command: %d\n", cmd);
2385 return -EINVAL;
2386 }
2387
2388 /* Push the new pending command to the tail of the pending list: FIFO */
2389 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2390
2391 o->set_sched(o);
2392
2393 return 1;
2394}
2395
2396/**
2397 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2398 *
2399 * @o:
2400 * @last: index to start looking from (including)
2401 *
2402 * returns the next found (set) bin or a negative value if none is found.
2403 */
2404static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2405{
2406 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2407
2408 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2409 if (o->registry.aprox_match.vec[i])
2410 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2411 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2412 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2413 vec, cur_bit)) {
2414 return cur_bit;
2415 }
2416 }
2417 inner_start = 0;
2418 }
2419
2420 /* None found */
2421 return -1;
2422}
2423
2424/**
2425 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2426 *
2427 * @o:
2428 *
2429 * returns the index of the found bin or -1 if none is found
2430 */
2431static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2432{
2433 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2434
2435 if (cur_bit >= 0)
2436 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2437
2438 return cur_bit;
2439}
2440
2441static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2442{
2443 struct bnx2x_raw_obj *raw = &o->raw;
2444 u8 rx_tx_flag = 0;
2445
2446 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2447 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2448 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2449
2450 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2451 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2452 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2453
2454 return rx_tx_flag;
2455}
2456
2457static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2458 struct bnx2x_mcast_obj *o, int idx,
2459 union bnx2x_mcast_config_data *cfg_data,
2460 int cmd)
2461{
2462 struct bnx2x_raw_obj *r = &o->raw;
2463 struct eth_multicast_rules_ramrod_data *data =
2464 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2465 u8 func_id = r->func_id;
2466 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2467 int bin;
2468
2469 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2470 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2471
2472 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2473
2474 /* Get a bin and update a bins' vector */
2475 switch (cmd) {
2476 case BNX2X_MCAST_CMD_ADD:
2477 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2478 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2479 break;
2480
2481 case BNX2X_MCAST_CMD_DEL:
2482 /* If there were no more bins to clear
2483 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2484 * clear any (0xff) bin.
2485 * See bnx2x_mcast_validate_e2() for explanation when it may
2486 * happen.
2487 */
2488 bin = bnx2x_mcast_clear_first_bin(o);
2489 break;
2490
2491 case BNX2X_MCAST_CMD_RESTORE:
2492 bin = cfg_data->bin;
2493 break;
2494
2495 default:
2496 BNX2X_ERR("Unknown command: %d\n", cmd);
2497 return;
2498 }
2499
2500 DP(BNX2X_MSG_SP, "%s bin %d\n",
2501 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2502 "Setting" : "Clearing"), bin);
2503
2504 data->rules[idx].bin_id = (u8)bin;
2505 data->rules[idx].func_id = func_id;
2506 data->rules[idx].engine_id = o->engine_id;
2507}
2508
2509/**
2510 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2511 *
2512 * @bp: device handle
2513 * @o:
2514 * @start_bin: index in the registry to start from (including)
2515 * @rdata_idx: index in the ramrod data to start from
2516 *
2517 * returns last handled bin index or -1 if all bins have been handled
2518 */
2519static inline int bnx2x_mcast_handle_restore_cmd_e2(
2520 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2521 int *rdata_idx)
2522{
2523 int cur_bin, cnt = *rdata_idx;
2524 union bnx2x_mcast_config_data cfg_data = {0};
2525
2526 /* go through the registry and configure the bins from it */
2527 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2528 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2529
2530 cfg_data.bin = (u8)cur_bin;
2531 o->set_one_rule(bp, o, cnt, &cfg_data,
2532 BNX2X_MCAST_CMD_RESTORE);
2533
2534 cnt++;
2535
2536 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2537
2538 /* Break if we reached the maximum number
2539 * of rules.
2540 */
2541 if (cnt >= o->max_cmd_len)
2542 break;
2543 }
2544
2545 *rdata_idx = cnt;
2546
2547 return cur_bin;
2548}
2549
2550static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2551 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2552 int *line_idx)
2553{
2554 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2555 int cnt = *line_idx;
2556 union bnx2x_mcast_config_data cfg_data = {0};
2557
2558 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2559 link) {
2560
2561 cfg_data.mac = &pmac_pos->mac[0];
2562 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2563
2564 cnt++;
2565
2566 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2567 " mcast MAC\n",
2568 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
2569
2570 list_del(&pmac_pos->link);
2571
2572 /* Break if we reached the maximum number
2573 * of rules.
2574 */
2575 if (cnt >= o->max_cmd_len)
2576 break;
2577 }
2578
2579 *line_idx = cnt;
2580
2581 /* if no more MACs to configure - we are done */
2582 if (list_empty(&cmd_pos->data.macs_head))
2583 cmd_pos->done = true;
2584}
2585
2586static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2587 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2588 int *line_idx)
2589{
2590 int cnt = *line_idx;
2591
2592 while (cmd_pos->data.macs_num) {
2593 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2594
2595 cnt++;
2596
2597 cmd_pos->data.macs_num--;
2598
2599 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2600 cmd_pos->data.macs_num, cnt);
2601
2602 /* Break if we reached the maximum
2603 * number of rules.
2604 */
2605 if (cnt >= o->max_cmd_len)
2606 break;
2607 }
2608
2609 *line_idx = cnt;
2610
2611 /* If we cleared all bins - we are done */
2612 if (!cmd_pos->data.macs_num)
2613 cmd_pos->done = true;
2614}
2615
2616static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2617 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2618 int *line_idx)
2619{
2620 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2621 line_idx);
2622
2623 if (cmd_pos->data.next_bin < 0)
2624 /* If o->set_restore returned -1 we are done */
2625 cmd_pos->done = true;
2626 else
2627 /* Start from the next bin next time */
2628 cmd_pos->data.next_bin++;
2629}
2630
2631static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2632 struct bnx2x_mcast_ramrod_params *p)
2633{
2634 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2635 int cnt = 0;
2636 struct bnx2x_mcast_obj *o = p->mcast_obj;
2637
2638 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2639 link) {
2640 switch (cmd_pos->type) {
2641 case BNX2X_MCAST_CMD_ADD:
2642 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2643 break;
2644
2645 case BNX2X_MCAST_CMD_DEL:
2646 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2647 break;
2648
2649 case BNX2X_MCAST_CMD_RESTORE:
2650 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2651 &cnt);
2652 break;
2653
2654 default:
2655 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2656 return -EINVAL;
2657 }
2658
2659 /* If the command has been completed - remove it from the list
2660 * and free the memory
2661 */
2662 if (cmd_pos->done) {
2663 list_del(&cmd_pos->link);
2664 kfree(cmd_pos);
2665 }
2666
2667 /* Break if we reached the maximum number of rules */
2668 if (cnt >= o->max_cmd_len)
2669 break;
2670 }
2671
2672 return cnt;
2673}
2674
2675static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2676 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2677 int *line_idx)
2678{
2679 struct bnx2x_mcast_list_elem *mlist_pos;
2680 union bnx2x_mcast_config_data cfg_data = {0};
2681 int cnt = *line_idx;
2682
2683 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2684 cfg_data.mac = mlist_pos->mac;
2685 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2686
2687 cnt++;
2688
2689 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2690 " mcast MAC\n",
2691 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
2692 }
2693
2694 *line_idx = cnt;
2695}
2696
2697static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2698 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2699 int *line_idx)
2700{
2701 int cnt = *line_idx, i;
2702
2703 for (i = 0; i < p->mcast_list_len; i++) {
2704 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2705
2706 cnt++;
2707
2708 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2709 p->mcast_list_len - i - 1);
2710 }
2711
2712 *line_idx = cnt;
2713}
2714
2715/**
2716 * bnx2x_mcast_handle_current_cmd -
2717 *
2718 * @bp: device handle
2719 * @p:
2720 * @cmd:
2721 * @start_cnt: first line in the ramrod data that may be used
2722 *
2723 * This function is called iff there is enough place for the current command in
2724 * the ramrod data.
2725 * Returns number of lines filled in the ramrod data in total.
2726 */
2727static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2728 struct bnx2x_mcast_ramrod_params *p, int cmd,
2729 int start_cnt)
2730{
2731 struct bnx2x_mcast_obj *o = p->mcast_obj;
2732 int cnt = start_cnt;
2733
2734 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2735
2736 switch (cmd) {
2737 case BNX2X_MCAST_CMD_ADD:
2738 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2739 break;
2740
2741 case BNX2X_MCAST_CMD_DEL:
2742 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2743 break;
2744
2745 case BNX2X_MCAST_CMD_RESTORE:
2746 o->hdl_restore(bp, o, 0, &cnt);
2747 break;
2748
2749 default:
2750 BNX2X_ERR("Unknown command: %d\n", cmd);
2751 return -EINVAL;
2752 }
2753
2754 /* The current command has been handled */
2755 p->mcast_list_len = 0;
2756
2757 return cnt;
2758}
2759
2760static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2761 struct bnx2x_mcast_ramrod_params *p,
2762 int cmd)
2763{
2764 struct bnx2x_mcast_obj *o = p->mcast_obj;
2765 int reg_sz = o->get_registry_size(o);
2766
2767 switch (cmd) {
2768 /* DEL command deletes all currently configured MACs */
2769 case BNX2X_MCAST_CMD_DEL:
2770 o->set_registry_size(o, 0);
2771 /* Don't break */
2772
2773 /* RESTORE command will restore the entire multicast configuration */
2774 case BNX2X_MCAST_CMD_RESTORE:
2775 /* Here we set the approximate amount of work to do, which in
2776 * fact may be only less as some MACs in postponed ADD
2777 * command(s) scheduled before this command may fall into
2778 * the same bin and the actual number of bins set in the
2779 * registry would be less than we estimated here. See
2780 * bnx2x_mcast_set_one_rule_e2() for further details.
2781 */
2782 p->mcast_list_len = reg_sz;
2783 break;
2784
2785 case BNX2X_MCAST_CMD_ADD:
2786 case BNX2X_MCAST_CMD_CONT:
2787 /* Here we assume that all new MACs will fall into new bins.
2788 * However we will correct the real registry size after we
2789 * handle all pending commands.
2790 */
2791 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2792 break;
2793
2794 default:
2795 BNX2X_ERR("Unknown command: %d\n", cmd);
2796 return -EINVAL;
2797
2798 }
2799
2800 /* Increase the total number of MACs pending to be configured */
2801 o->total_pending_num += p->mcast_list_len;
2802
2803 return 0;
2804}
2805
2806static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2807 struct bnx2x_mcast_ramrod_params *p,
2808 int old_num_bins)
2809{
2810 struct bnx2x_mcast_obj *o = p->mcast_obj;
2811
2812 o->set_registry_size(o, old_num_bins);
2813 o->total_pending_num -= p->mcast_list_len;
2814}
2815
2816/**
2817 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2818 *
2819 * @bp: device handle
2820 * @p:
2821 * @len: number of rules to handle
2822 */
2823static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2824 struct bnx2x_mcast_ramrod_params *p,
2825 u8 len)
2826{
2827 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2828 struct eth_multicast_rules_ramrod_data *data =
2829 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2830
2831 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2832 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2833 data->header.rule_cnt = len;
2834}
2835
2836/**
2837 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2838 *
2839 * @bp: device handle
2840 * @o:
2841 *
2842 * Recalculate the actual number of set bins in the registry using Brian
2843 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2844 *
2845 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2846 */
2847static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2848 struct bnx2x_mcast_obj *o)
2849{
2850 int i, cnt = 0;
2851 u64 elem;
2852
2853 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2854 elem = o->registry.aprox_match.vec[i];
2855 for (; elem; cnt++)
2856 elem &= elem - 1;
2857 }
2858
2859 o->set_registry_size(o, cnt);
2860
2861 return 0;
2862}
2863
2864static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2865 struct bnx2x_mcast_ramrod_params *p,
2866 int cmd)
2867{
2868 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2869 struct bnx2x_mcast_obj *o = p->mcast_obj;
2870 struct eth_multicast_rules_ramrod_data *data =
2871 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2872 int cnt = 0, rc;
2873
2874 /* Reset the ramrod data buffer */
2875 memset(data, 0, sizeof(*data));
2876
2877 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2878
2879 /* If there are no more pending commands - clear SCHEDULED state */
2880 if (list_empty(&o->pending_cmds_head))
2881 o->clear_sched(o);
2882
2883 /* The below may be true iff there was enough room in ramrod
2884 * data for all pending commands and for the current
2885 * command. Otherwise the current command would have been added
2886 * to the pending commands and p->mcast_list_len would have been
2887 * zeroed.
2888 */
2889 if (p->mcast_list_len > 0)
2890 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2891
2892 /* We've pulled out some MACs - update the total number of
2893 * outstanding.
2894 */
2895 o->total_pending_num -= cnt;
2896
2897 /* send a ramrod */
2898 WARN_ON(o->total_pending_num < 0);
2899 WARN_ON(cnt > o->max_cmd_len);
2900
2901 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2902
2903 /* Update a registry size if there are no more pending operations.
2904 *
2905 * We don't want to change the value of the registry size if there are
2906 * pending operations because we want it to always be equal to the
2907 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2908 * set bins after the last requested operation in order to properly
2909 * evaluate the size of the next DEL/RESTORE operation.
2910 *
2911 * Note that we update the registry itself during command(s) handling
2912 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2913 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2914 * with a limited amount of update commands (per MAC/bin) and we don't
2915 * know in this scope what the actual state of bins configuration is
2916 * going to be after this ramrod.
2917 */
2918 if (!o->total_pending_num)
2919 bnx2x_mcast_refresh_registry_e2(bp, o);
2920
2921 /* Commit writes towards the memory before sending a ramrod */
2922 mb();
2923
2924 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2925 * RAMROD_PENDING status immediately.
2926 */
2927 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2928 raw->clear_pending(raw);
2929 return 0;
2930 } else {
2931 /* Send a ramrod */
2932 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2933 raw->cid, U64_HI(raw->rdata_mapping),
2934 U64_LO(raw->rdata_mapping),
2935 ETH_CONNECTION_TYPE);
2936 if (rc)
2937 return rc;
2938
2939 /* Ramrod completion is pending */
2940 return 1;
2941 }
2942}
2943
2944static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2945 struct bnx2x_mcast_ramrod_params *p,
2946 int cmd)
2947{
2948 /* Mark, that there is a work to do */
2949 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2950 p->mcast_list_len = 1;
2951
2952 return 0;
2953}
2954
2955static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2956 struct bnx2x_mcast_ramrod_params *p,
2957 int old_num_bins)
2958{
2959 /* Do nothing */
2960}
2961
2962#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2963do { \
2964 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2965} while (0)
2966
2967static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2968 struct bnx2x_mcast_obj *o,
2969 struct bnx2x_mcast_ramrod_params *p,
2970 u32 *mc_filter)
2971{
2972 struct bnx2x_mcast_list_elem *mlist_pos;
2973 int bit;
2974
2975 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2976 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2977 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
2978
2979 DP(BNX2X_MSG_SP, "About to configure "
2980 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
2981 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
2982
2983 /* bookkeeping... */
2984 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
2985 bit);
2986 }
2987}
2988
2989static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
2990 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2991 u32 *mc_filter)
2992{
2993 int bit;
2994
2995 for (bit = bnx2x_mcast_get_next_bin(o, 0);
2996 bit >= 0;
2997 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
2998 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
2999 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3000 }
3001}
3002
3003/* On 57711 we write the multicast MACs' aproximate match
3004 * table by directly into the TSTORM's internal RAM. So we don't
3005 * really need to handle any tricks to make it work.
3006 */
3007static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3008 struct bnx2x_mcast_ramrod_params *p,
3009 int cmd)
3010{
3011 int i;
3012 struct bnx2x_mcast_obj *o = p->mcast_obj;
3013 struct bnx2x_raw_obj *r = &o->raw;
3014
3015 /* If CLEAR_ONLY has been requested - clear the registry
3016 * and clear a pending bit.
3017 */
3018 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3019 u32 mc_filter[MC_HASH_SIZE] = {0};
3020
3021 /* Set the multicast filter bits before writing it into
3022 * the internal memory.
3023 */
3024 switch (cmd) {
3025 case BNX2X_MCAST_CMD_ADD:
3026 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3027 break;
3028
3029 case BNX2X_MCAST_CMD_DEL:
3030 DP(BNX2X_MSG_SP, "Invalidating multicast "
3031 "MACs configuration\n");
3032
3033 /* clear the registry */
3034 memset(o->registry.aprox_match.vec, 0,
3035 sizeof(o->registry.aprox_match.vec));
3036 break;
3037
3038 case BNX2X_MCAST_CMD_RESTORE:
3039 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3040 break;
3041
3042 default:
3043 BNX2X_ERR("Unknown command: %d\n", cmd);
3044 return -EINVAL;
3045 }
3046
3047 /* Set the mcast filter in the internal memory */
3048 for (i = 0; i < MC_HASH_SIZE; i++)
3049 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3050 } else
3051 /* clear the registry */
3052 memset(o->registry.aprox_match.vec, 0,
3053 sizeof(o->registry.aprox_match.vec));
3054
3055 /* We are done */
3056 r->clear_pending(r);
3057
3058 return 0;
3059}
3060
3061static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3062 struct bnx2x_mcast_ramrod_params *p,
3063 int cmd)
3064{
3065 struct bnx2x_mcast_obj *o = p->mcast_obj;
3066 int reg_sz = o->get_registry_size(o);
3067
3068 switch (cmd) {
3069 /* DEL command deletes all currently configured MACs */
3070 case BNX2X_MCAST_CMD_DEL:
3071 o->set_registry_size(o, 0);
3072 /* Don't break */
3073
3074 /* RESTORE command will restore the entire multicast configuration */
3075 case BNX2X_MCAST_CMD_RESTORE:
3076 p->mcast_list_len = reg_sz;
3077 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3078 cmd, p->mcast_list_len);
3079 break;
3080
3081 case BNX2X_MCAST_CMD_ADD:
3082 case BNX2X_MCAST_CMD_CONT:
3083 /* Multicast MACs on 57710 are configured as unicast MACs and
3084 * there is only a limited number of CAM entries for that
3085 * matter.
3086 */
3087 if (p->mcast_list_len > o->max_cmd_len) {
3088 BNX2X_ERR("Can't configure more than %d multicast MACs"
3089 "on 57710\n", o->max_cmd_len);
3090 return -EINVAL;
3091 }
3092 /* Every configured MAC should be cleared if DEL command is
3093 * called. Only the last ADD command is relevant as long as
3094 * every ADD commands overrides the previous configuration.
3095 */
3096 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3097 if (p->mcast_list_len > 0)
3098 o->set_registry_size(o, p->mcast_list_len);
3099
3100 break;
3101
3102 default:
3103 BNX2X_ERR("Unknown command: %d\n", cmd);
3104 return -EINVAL;
3105
3106 }
3107
3108 /* We want to ensure that commands are executed one by one for 57710.
3109 * Therefore each none-empty command will consume o->max_cmd_len.
3110 */
3111 if (p->mcast_list_len)
3112 o->total_pending_num += o->max_cmd_len;
3113
3114 return 0;
3115}
3116
3117static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3118 struct bnx2x_mcast_ramrod_params *p,
3119 int old_num_macs)
3120{
3121 struct bnx2x_mcast_obj *o = p->mcast_obj;
3122
3123 o->set_registry_size(o, old_num_macs);
3124
3125 /* If current command hasn't been handled yet and we are
3126 * here means that it's meant to be dropped and we have to
3127 * update the number of outstandling MACs accordingly.
3128 */
3129 if (p->mcast_list_len)
3130 o->total_pending_num -= o->max_cmd_len;
3131}
3132
3133static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3134 struct bnx2x_mcast_obj *o, int idx,
3135 union bnx2x_mcast_config_data *cfg_data,
3136 int cmd)
3137{
3138 struct bnx2x_raw_obj *r = &o->raw;
3139 struct mac_configuration_cmd *data =
3140 (struct mac_configuration_cmd *)(r->rdata);
3141
3142 /* copy mac */
3143 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3144 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3145 &data->config_table[idx].middle_mac_addr,
3146 &data->config_table[idx].lsb_mac_addr,
3147 cfg_data->mac);
3148
3149 data->config_table[idx].vlan_id = 0;
3150 data->config_table[idx].pf_id = r->func_id;
3151 data->config_table[idx].clients_bit_vector =
3152 cpu_to_le32(1 << r->cl_id);
3153
3154 SET_FLAG(data->config_table[idx].flags,
3155 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3156 T_ETH_MAC_COMMAND_SET);
3157 }
3158}
3159
3160/**
3161 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3162 *
3163 * @bp: device handle
3164 * @p:
3165 * @len: number of rules to handle
3166 */
3167static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3168 struct bnx2x_mcast_ramrod_params *p,
3169 u8 len)
3170{
3171 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3172 struct mac_configuration_cmd *data =
3173 (struct mac_configuration_cmd *)(r->rdata);
3174
3175 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3176 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3177 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3178
3179 data->hdr.offset = offset;
3180 data->hdr.client_id = 0xff;
3181 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3182 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3183 data->hdr.length = len;
3184}
3185
3186/**
3187 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3188 *
3189 * @bp: device handle
3190 * @o:
3191 * @start_idx: index in the registry to start from
3192 * @rdata_idx: index in the ramrod data to start from
3193 *
3194 * restore command for 57710 is like all other commands - always a stand alone
3195 * command - start_idx and rdata_idx will always be 0. This function will always
3196 * succeed.
3197 * returns -1 to comply with 57712 variant.
3198 */
3199static inline int bnx2x_mcast_handle_restore_cmd_e1(
3200 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3201 int *rdata_idx)
3202{
3203 struct bnx2x_mcast_mac_elem *elem;
3204 int i = 0;
3205 union bnx2x_mcast_config_data cfg_data = {0};
3206
3207 /* go through the registry and configure the MACs from it. */
3208 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3209 cfg_data.mac = &elem->mac[0];
3210 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3211
3212 i++;
3213
3214 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3215 " mcast MAC\n",
3216 BNX2X_MAC_PRN_LIST(cfg_data.mac));
3217 }
3218
3219 *rdata_idx = i;
3220
3221 return -1;
3222}
3223
3224
3225static inline int bnx2x_mcast_handle_pending_cmds_e1(
3226 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3227{
3228 struct bnx2x_pending_mcast_cmd *cmd_pos;
3229 struct bnx2x_mcast_mac_elem *pmac_pos;
3230 struct bnx2x_mcast_obj *o = p->mcast_obj;
3231 union bnx2x_mcast_config_data cfg_data = {0};
3232 int cnt = 0;
3233
3234
3235 /* If nothing to be done - return */
3236 if (list_empty(&o->pending_cmds_head))
3237 return 0;
3238
3239 /* Handle the first command */
3240 cmd_pos = list_first_entry(&o->pending_cmds_head,
3241 struct bnx2x_pending_mcast_cmd, link);
3242
3243 switch (cmd_pos->type) {
3244 case BNX2X_MCAST_CMD_ADD:
3245 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3246 cfg_data.mac = &pmac_pos->mac[0];
3247 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3248
3249 cnt++;
3250
3251 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3252 " mcast MAC\n",
3253 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
3254 }
3255 break;
3256
3257 case BNX2X_MCAST_CMD_DEL:
3258 cnt = cmd_pos->data.macs_num;
3259 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3260 break;
3261
3262 case BNX2X_MCAST_CMD_RESTORE:
3263 o->hdl_restore(bp, o, 0, &cnt);
3264 break;
3265
3266 default:
3267 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3268 return -EINVAL;
3269 }
3270
3271 list_del(&cmd_pos->link);
3272 kfree(cmd_pos);
3273
3274 return cnt;
3275}
3276
3277/**
3278 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3279 *
3280 * @fw_hi:
3281 * @fw_mid:
3282 * @fw_lo:
3283 * @mac:
3284 */
3285static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3286 __le16 *fw_lo, u8 *mac)
3287{
3288 mac[1] = ((u8 *)fw_hi)[0];
3289 mac[0] = ((u8 *)fw_hi)[1];
3290 mac[3] = ((u8 *)fw_mid)[0];
3291 mac[2] = ((u8 *)fw_mid)[1];
3292 mac[5] = ((u8 *)fw_lo)[0];
3293 mac[4] = ((u8 *)fw_lo)[1];
3294}
3295
3296/**
3297 * bnx2x_mcast_refresh_registry_e1 -
3298 *
3299 * @bp: device handle
3300 * @cnt:
3301 *
3302 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3303 * and update the registry correspondingly: if ADD - allocate a memory and add
3304 * the entries to the registry (list), if DELETE - clear the registry and free
3305 * the memory.
3306 */
3307static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3308 struct bnx2x_mcast_obj *o)
3309{
3310 struct bnx2x_raw_obj *raw = &o->raw;
3311 struct bnx2x_mcast_mac_elem *elem;
3312 struct mac_configuration_cmd *data =
3313 (struct mac_configuration_cmd *)(raw->rdata);
3314
3315 /* If first entry contains a SET bit - the command was ADD,
3316 * otherwise - DEL_ALL
3317 */
3318 if (GET_FLAG(data->config_table[0].flags,
3319 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3320 int i, len = data->hdr.length;
3321
3322 /* Break if it was a RESTORE command */
3323 if (!list_empty(&o->registry.exact_match.macs))
3324 return 0;
3325
3326 elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3327 if (!elem) {
3328 BNX2X_ERR("Failed to allocate registry memory\n");
3329 return -ENOMEM;
3330 }
3331
3332 for (i = 0; i < len; i++, elem++) {
3333 bnx2x_get_fw_mac_addr(
3334 &data->config_table[i].msb_mac_addr,
3335 &data->config_table[i].middle_mac_addr,
3336 &data->config_table[i].lsb_mac_addr,
3337 elem->mac);
3338 DP(BNX2X_MSG_SP, "Adding registry entry for ["
3339 BNX2X_MAC_FMT"]\n",
3340 BNX2X_MAC_PRN_LIST(elem->mac));
3341 list_add_tail(&elem->link,
3342 &o->registry.exact_match.macs);
3343 }
3344 } else {
3345 elem = list_first_entry(&o->registry.exact_match.macs,
3346 struct bnx2x_mcast_mac_elem, link);
3347 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3348 kfree(elem);
3349 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3350 }
3351
3352 return 0;
3353}
3354
3355static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3356 struct bnx2x_mcast_ramrod_params *p,
3357 int cmd)
3358{
3359 struct bnx2x_mcast_obj *o = p->mcast_obj;
3360 struct bnx2x_raw_obj *raw = &o->raw;
3361 struct mac_configuration_cmd *data =
3362 (struct mac_configuration_cmd *)(raw->rdata);
3363 int cnt = 0, i, rc;
3364
3365 /* Reset the ramrod data buffer */
3366 memset(data, 0, sizeof(*data));
3367
3368 /* First set all entries as invalid */
3369 for (i = 0; i < o->max_cmd_len ; i++)
3370 SET_FLAG(data->config_table[i].flags,
3371 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3372 T_ETH_MAC_COMMAND_INVALIDATE);
3373
3374 /* Handle pending commands first */
3375 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3376
3377 /* If there are no more pending commands - clear SCHEDULED state */
3378 if (list_empty(&o->pending_cmds_head))
3379 o->clear_sched(o);
3380
3381 /* The below may be true iff there were no pending commands */
3382 if (!cnt)
3383 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3384
3385 /* For 57710 every command has o->max_cmd_len length to ensure that
3386 * commands are done one at a time.
3387 */
3388 o->total_pending_num -= o->max_cmd_len;
3389
3390 /* send a ramrod */
3391
3392 WARN_ON(cnt > o->max_cmd_len);
3393
3394 /* Set ramrod header (in particular, a number of entries to update) */
3395 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3396
3397 /* update a registry: we need the registry contents to be always up
3398 * to date in order to be able to execute a RESTORE opcode. Here
3399 * we use the fact that for 57710 we sent one command at a time
3400 * hence we may take the registry update out of the command handling
3401 * and do it in a simpler way here.
3402 */
3403 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3404 if (rc)
3405 return rc;
3406
3407 /* Commit writes towards the memory before sending a ramrod */
3408 mb();
3409
3410 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3411 * RAMROD_PENDING status immediately.
3412 */
3413 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3414 raw->clear_pending(raw);
3415 return 0;
3416 } else {
3417 /* Send a ramrod */
3418 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3419 U64_HI(raw->rdata_mapping),
3420 U64_LO(raw->rdata_mapping),
3421 ETH_CONNECTION_TYPE);
3422 if (rc)
3423 return rc;
3424
3425 /* Ramrod completion is pending */
3426 return 1;
3427 }
3428
3429}
3430
3431static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3432{
3433 return o->registry.exact_match.num_macs_set;
3434}
3435
3436static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3437{
3438 return o->registry.aprox_match.num_bins_set;
3439}
3440
3441static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3442 int n)
3443{
3444 o->registry.exact_match.num_macs_set = n;
3445}
3446
3447static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3448 int n)
3449{
3450 o->registry.aprox_match.num_bins_set = n;
3451}
3452
3453int bnx2x_config_mcast(struct bnx2x *bp,
3454 struct bnx2x_mcast_ramrod_params *p,
3455 int cmd)
3456{
3457 struct bnx2x_mcast_obj *o = p->mcast_obj;
3458 struct bnx2x_raw_obj *r = &o->raw;
3459 int rc = 0, old_reg_size;
3460
3461 /* This is needed to recover number of currently configured mcast macs
3462 * in case of failure.
3463 */
3464 old_reg_size = o->get_registry_size(o);
3465
3466 /* Do some calculations and checks */
3467 rc = o->validate(bp, p, cmd);
3468 if (rc)
3469 return rc;
3470
3471 /* Return if there is no work to do */
3472 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3473 return 0;
3474
3475 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3476 "o->max_cmd_len=%d\n", o->total_pending_num,
3477 p->mcast_list_len, o->max_cmd_len);
3478
3479 /* Enqueue the current command to the pending list if we can't complete
3480 * it in the current iteration
3481 */
3482 if (r->check_pending(r) ||
3483 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3484 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3485 if (rc < 0)
3486 goto error_exit1;
3487
3488 /* As long as the current command is in a command list we
3489 * don't need to handle it separately.
3490 */
3491 p->mcast_list_len = 0;
3492 }
3493
3494 if (!r->check_pending(r)) {
3495
3496 /* Set 'pending' state */
3497 r->set_pending(r);
3498
3499 /* Configure the new classification in the chip */
3500 rc = o->config_mcast(bp, p, cmd);
3501 if (rc < 0)
3502 goto error_exit2;
3503
3504 /* Wait for a ramrod completion if was requested */
3505 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3506 rc = o->wait_comp(bp, o);
3507 }
3508
3509 return rc;
3510
3511error_exit2:
3512 r->clear_pending(r);
3513
3514error_exit1:
3515 o->revert(bp, p, old_reg_size);
3516
3517 return rc;
3518}
3519
3520static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3521{
3522 smp_mb__before_clear_bit();
3523 clear_bit(o->sched_state, o->raw.pstate);
3524 smp_mb__after_clear_bit();
3525}
3526
3527static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3528{
3529 smp_mb__before_clear_bit();
3530 set_bit(o->sched_state, o->raw.pstate);
3531 smp_mb__after_clear_bit();
3532}
3533
3534static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3535{
3536 return !!test_bit(o->sched_state, o->raw.pstate);
3537}
3538
3539static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3540{
3541 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3542}
3543
3544void bnx2x_init_mcast_obj(struct bnx2x *bp,
3545 struct bnx2x_mcast_obj *mcast_obj,
3546 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3547 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3548 int state, unsigned long *pstate, bnx2x_obj_type type)
3549{
3550 memset(mcast_obj, 0, sizeof(*mcast_obj));
3551
3552 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3553 rdata, rdata_mapping, state, pstate, type);
3554
3555 mcast_obj->engine_id = engine_id;
3556
3557 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3558
3559 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3560 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3561 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3562 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3563
3564 if (CHIP_IS_E1(bp)) {
3565 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3566 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3567 mcast_obj->hdl_restore =
3568 bnx2x_mcast_handle_restore_cmd_e1;
3569 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3570
3571 if (CHIP_REV_IS_SLOW(bp))
3572 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3573 else
3574 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3575
3576 mcast_obj->wait_comp = bnx2x_mcast_wait;
3577 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3578 mcast_obj->validate = bnx2x_mcast_validate_e1;
3579 mcast_obj->revert = bnx2x_mcast_revert_e1;
3580 mcast_obj->get_registry_size =
3581 bnx2x_mcast_get_registry_size_exact;
3582 mcast_obj->set_registry_size =
3583 bnx2x_mcast_set_registry_size_exact;
3584
3585 /* 57710 is the only chip that uses the exact match for mcast
3586 * at the moment.
3587 */
3588 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3589
3590 } else if (CHIP_IS_E1H(bp)) {
3591 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3592 mcast_obj->enqueue_cmd = NULL;
3593 mcast_obj->hdl_restore = NULL;
3594 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3595
3596 /* 57711 doesn't send a ramrod, so it has unlimited credit
3597 * for one command.
3598 */
3599 mcast_obj->max_cmd_len = -1;
3600 mcast_obj->wait_comp = bnx2x_mcast_wait;
3601 mcast_obj->set_one_rule = NULL;
3602 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3603 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3604 mcast_obj->get_registry_size =
3605 bnx2x_mcast_get_registry_size_aprox;
3606 mcast_obj->set_registry_size =
3607 bnx2x_mcast_set_registry_size_aprox;
3608 } else {
3609 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3610 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3611 mcast_obj->hdl_restore =
3612 bnx2x_mcast_handle_restore_cmd_e2;
3613 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3614 /* TODO: There should be a proper HSI define for this number!!!
3615 */
3616 mcast_obj->max_cmd_len = 16;
3617 mcast_obj->wait_comp = bnx2x_mcast_wait;
3618 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3619 mcast_obj->validate = bnx2x_mcast_validate_e2;
3620 mcast_obj->revert = bnx2x_mcast_revert_e2;
3621 mcast_obj->get_registry_size =
3622 bnx2x_mcast_get_registry_size_aprox;
3623 mcast_obj->set_registry_size =
3624 bnx2x_mcast_set_registry_size_aprox;
3625 }
3626}
3627
3628/*************************** Credit handling **********************************/
3629
3630/**
3631 * atomic_add_ifless - add if the result is less than a given value.
3632 *
3633 * @v: pointer of type atomic_t
3634 * @a: the amount to add to v...
3635 * @u: ...if (v + a) is less than u.
3636 *
3637 * returns true if (v + a) was less than u, and false otherwise.
3638 *
3639 */
3640static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3641{
3642 int c, old;
3643
3644 c = atomic_read(v);
3645 for (;;) {
3646 if (unlikely(c + a >= u))
3647 return false;
3648
3649 old = atomic_cmpxchg((v), c, c + a);
3650 if (likely(old == c))
3651 break;
3652 c = old;
3653 }
3654
3655 return true;
3656}
3657
3658/**
3659 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3660 *
3661 * @v: pointer of type atomic_t
3662 * @a: the amount to dec from v...
3663 * @u: ...if (v - a) is more or equal than u.
3664 *
3665 * returns true if (v - a) was more or equal than u, and false
3666 * otherwise.
3667 */
3668static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3669{
3670 int c, old;
3671
3672 c = atomic_read(v);
3673 for (;;) {
3674 if (unlikely(c - a < u))
3675 return false;
3676
3677 old = atomic_cmpxchg((v), c, c - a);
3678 if (likely(old == c))
3679 break;
3680 c = old;
3681 }
3682
3683 return true;
3684}
3685
3686static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3687{
3688 bool rc;
3689
3690 smp_mb();
3691 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3692 smp_mb();
3693
3694 return rc;
3695}
3696
3697static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3698{
3699 bool rc;
3700
3701 smp_mb();
3702
3703 /* Don't let to refill if credit + cnt > pool_sz */
3704 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3705
3706 smp_mb();
3707
3708 return rc;
3709}
3710
3711static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3712{
3713 int cur_credit;
3714
3715 smp_mb();
3716 cur_credit = atomic_read(&o->credit);
3717
3718 return cur_credit;
3719}
3720
3721static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3722 int cnt)
3723{
3724 return true;
3725}
3726
3727
3728static bool bnx2x_credit_pool_get_entry(
3729 struct bnx2x_credit_pool_obj *o,
3730 int *offset)
3731{
3732 int idx, vec, i;
3733
3734 *offset = -1;
3735
3736 /* Find "internal cam-offset" then add to base for this object... */
3737 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3738
3739 /* Skip the current vector if there are no free entries in it */
3740 if (!o->pool_mirror[vec])
3741 continue;
3742
3743 /* If we've got here we are going to find a free entry */
3744 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3745 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3746
3747 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3748 /* Got one!! */
3749 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3750 *offset = o->base_pool_offset + idx;
3751 return true;
3752 }
3753 }
3754
3755 return false;
3756}
3757
3758static bool bnx2x_credit_pool_put_entry(
3759 struct bnx2x_credit_pool_obj *o,
3760 int offset)
3761{
3762 if (offset < o->base_pool_offset)
3763 return false;
3764
3765 offset -= o->base_pool_offset;
3766
3767 if (offset >= o->pool_sz)
3768 return false;
3769
3770 /* Return the entry to the pool */
3771 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3772
3773 return true;
3774}
3775
3776static bool bnx2x_credit_pool_put_entry_always_true(
3777 struct bnx2x_credit_pool_obj *o,
3778 int offset)
3779{
3780 return true;
3781}
3782
3783static bool bnx2x_credit_pool_get_entry_always_true(
3784 struct bnx2x_credit_pool_obj *o,
3785 int *offset)
3786{
3787 *offset = -1;
3788 return true;
3789}
3790/**
3791 * bnx2x_init_credit_pool - initialize credit pool internals.
3792 *
3793 * @p:
3794 * @base: Base entry in the CAM to use.
3795 * @credit: pool size.
3796 *
3797 * If base is negative no CAM entries handling will be performed.
3798 * If credit is negative pool operations will always succeed (unlimited pool).
3799 *
3800 */
3801static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3802 int base, int credit)
3803{
3804 /* Zero the object first */
3805 memset(p, 0, sizeof(*p));
3806
3807 /* Set the table to all 1s */
3808 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3809
3810 /* Init a pool as full */
3811 atomic_set(&p->credit, credit);
3812
3813 /* The total poll size */
3814 p->pool_sz = credit;
3815
3816 p->base_pool_offset = base;
3817
3818 /* Commit the change */
3819 smp_mb();
3820
3821 p->check = bnx2x_credit_pool_check;
3822
3823 /* if pool credit is negative - disable the checks */
3824 if (credit >= 0) {
3825 p->put = bnx2x_credit_pool_put;
3826 p->get = bnx2x_credit_pool_get;
3827 p->put_entry = bnx2x_credit_pool_put_entry;
3828 p->get_entry = bnx2x_credit_pool_get_entry;
3829 } else {
3830 p->put = bnx2x_credit_pool_always_true;
3831 p->get = bnx2x_credit_pool_always_true;
3832 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3833 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3834 }
3835
3836 /* If base is negative - disable entries handling */
3837 if (base < 0) {
3838 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3839 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3840 }
3841}
3842
3843void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3844 struct bnx2x_credit_pool_obj *p, u8 func_id,
3845 u8 func_num)
3846{
3847/* TODO: this will be defined in consts as well... */
3848#define BNX2X_CAM_SIZE_EMUL 5
3849
3850 int cam_sz;
3851
3852 if (CHIP_IS_E1(bp)) {
3853 /* In E1, Multicast is saved in cam... */
3854 if (!CHIP_REV_IS_SLOW(bp))
3855 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3856 else
3857 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3858
3859 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3860
3861 } else if (CHIP_IS_E1H(bp)) {
3862 /* CAM credit is equaly divided between all active functions
3863 * on the PORT!.
3864 */
3865 if ((func_num > 0)) {
3866 if (!CHIP_REV_IS_SLOW(bp))
3867 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3868 else
3869 cam_sz = BNX2X_CAM_SIZE_EMUL;
3870 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3871 } else {
3872 /* this should never happen! Block MAC operations. */
3873 bnx2x_init_credit_pool(p, 0, 0);
3874 }
3875
3876 } else {
3877
3878 /*
3879 * CAM credit is equaly divided between all active functions
3880 * on the PATH.
3881 */
3882 if ((func_num > 0)) {
3883 if (!CHIP_REV_IS_SLOW(bp))
3884 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3885 else
3886 cam_sz = BNX2X_CAM_SIZE_EMUL;
3887
3888 /*
3889 * No need for CAM entries handling for 57712 and
3890 * newer.
3891 */
3892 bnx2x_init_credit_pool(p, -1, cam_sz);
3893 } else {
3894 /* this should never happen! Block MAC operations. */
3895 bnx2x_init_credit_pool(p, 0, 0);
3896 }
3897
3898 }
3899}
3900
3901void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3902 struct bnx2x_credit_pool_obj *p,
3903 u8 func_id,
3904 u8 func_num)
3905{
3906 if (CHIP_IS_E1x(bp)) {
3907 /*
3908 * There is no VLAN credit in HW on 57710 and 57711 only
3909 * MAC / MAC-VLAN can be set
3910 */
3911 bnx2x_init_credit_pool(p, 0, -1);
3912 } else {
3913 /*
3914 * CAM credit is equaly divided between all active functions
3915 * on the PATH.
3916 */
3917 if (func_num > 0) {
3918 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3919 bnx2x_init_credit_pool(p, func_id * credit, credit);
3920 } else
3921 /* this should never happen! Block VLAN operations. */
3922 bnx2x_init_credit_pool(p, 0, 0);
3923 }
3924}
3925
3926/****************** RSS Configuration ******************/
3927/**
3928 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3929 *
3930 * @bp: driver hanlde
3931 * @p: pointer to rss configuration
3932 *
3933 * Prints it when NETIF_MSG_IFUP debug level is configured.
3934 */
3935static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3936 struct bnx2x_config_rss_params *p)
3937{
042181f5
VZ
3938 int i;
3939
619c5cb6
VZ
3940 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3941 DP(BNX2X_MSG_SP, "0x0000: ");
3942 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3943 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3944
3945 /* Print 4 bytes in a line */
3946 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3947 (((i + 1) & 0x3) == 0)) {
3948 DP_CONT(BNX2X_MSG_SP, "\n");
3949 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3950 }
3951 }
3952
3953 DP_CONT(BNX2X_MSG_SP, "\n");
3954}
3955
3956/**
3957 * bnx2x_setup_rss - configure RSS
3958 *
3959 * @bp: device handle
3960 * @p: rss configuration
3961 *
3962 * sends on UPDATE ramrod for that matter.
3963 */
3964static int bnx2x_setup_rss(struct bnx2x *bp,
3965 struct bnx2x_config_rss_params *p)
3966{
3967 struct bnx2x_rss_config_obj *o = p->rss_obj;
3968 struct bnx2x_raw_obj *r = &o->raw;
3969 struct eth_rss_update_ramrod_data *data =
3970 (struct eth_rss_update_ramrod_data *)(r->rdata);
3971 u8 rss_mode = 0;
3972 int rc;
3973
3974 memset(data, 0, sizeof(*data));
3975
3976 DP(BNX2X_MSG_SP, "Configuring RSS\n");
3977
3978 /* Set an echo field */
3979 data->echo = (r->cid & BNX2X_SWCID_MASK) |
3980 (r->state << BNX2X_SWCID_SHIFT);
3981
3982 /* RSS mode */
3983 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
3984 rss_mode = ETH_RSS_MODE_DISABLED;
3985 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
3986 rss_mode = ETH_RSS_MODE_REGULAR;
3987 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
3988 rss_mode = ETH_RSS_MODE_VLAN_PRI;
3989 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
3990 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
3991 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
3992 rss_mode = ETH_RSS_MODE_IP_DSCP;
3993
3994 data->rss_mode = rss_mode;
3995
3996 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
3997
3998 /* RSS capabilities */
3999 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4000 data->capabilities |=
4001 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4002
4003 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4004 data->capabilities |=
4005 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4006
4007 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4008 data->capabilities |=
4009 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4010
4011 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4012 data->capabilities |=
4013 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4014
4015 /* Hashing mask */
4016 data->rss_result_mask = p->rss_result_mask;
4017
4018 /* RSS engine ID */
4019 data->rss_engine_id = o->engine_id;
4020
4021 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4022
4023 /* Indirection table */
4024 memcpy(data->indirection_table, p->ind_table,
4025 T_ETH_INDIRECTION_TABLE_SIZE);
4026
4027 /* Remember the last configuration */
4028 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4029
4030 /* Print the indirection table */
4031 if (netif_msg_ifup(bp))
4032 bnx2x_debug_print_ind_table(bp, p);
4033
4034 /* RSS keys */
4035 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4036 memcpy(&data->rss_key[0], &p->rss_key[0],
4037 sizeof(data->rss_key));
4038 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4039 }
4040
4041 /* Commit writes towards the memory before sending a ramrod */
4042 mb();
4043
4044 /* Send a ramrod */
4045 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4046 U64_HI(r->rdata_mapping),
4047 U64_LO(r->rdata_mapping),
4048 ETH_CONNECTION_TYPE);
4049
4050 if (rc < 0)
4051 return rc;
4052
4053 return 1;
4054}
4055
4056void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4057 u8 *ind_table)
4058{
4059 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4060}
4061
4062int bnx2x_config_rss(struct bnx2x *bp,
4063 struct bnx2x_config_rss_params *p)
4064{
4065 int rc;
4066 struct bnx2x_rss_config_obj *o = p->rss_obj;
4067 struct bnx2x_raw_obj *r = &o->raw;
4068
4069 /* Do nothing if only driver cleanup was requested */
4070 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4071 return 0;
4072
4073 r->set_pending(r);
4074
4075 rc = o->config_rss(bp, p);
4076 if (rc < 0) {
4077 r->clear_pending(r);
4078 return rc;
4079 }
4080
4081 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4082 rc = r->wait_comp(bp, r);
4083
4084 return rc;
4085}
4086
4087
4088void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4089 struct bnx2x_rss_config_obj *rss_obj,
4090 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4091 void *rdata, dma_addr_t rdata_mapping,
4092 int state, unsigned long *pstate,
4093 bnx2x_obj_type type)
4094{
4095 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4096 rdata_mapping, state, pstate, type);
4097
4098 rss_obj->engine_id = engine_id;
4099 rss_obj->config_rss = bnx2x_setup_rss;
4100}
4101
4102/********************** Queue state object ***********************************/
4103
4104/**
4105 * bnx2x_queue_state_change - perform Queue state change transition
4106 *
4107 * @bp: device handle
4108 * @params: parameters to perform the transition
4109 *
4110 * returns 0 in case of successfully completed transition, negative error
4111 * code in case of failure, positive (EBUSY) value if there is a completion
4112 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4113 * not set in params->ramrod_flags for asynchronous commands).
4114 *
4115 */
4116int bnx2x_queue_state_change(struct bnx2x *bp,
4117 struct bnx2x_queue_state_params *params)
4118{
4119 struct bnx2x_queue_sp_obj *o = params->q_obj;
4120 int rc, pending_bit;
4121 unsigned long *pending = &o->pending;
4122
4123 /* Check that the requested transition is legal */
4124 if (o->check_transition(bp, o, params))
4125 return -EINVAL;
4126
4127 /* Set "pending" bit */
4128 pending_bit = o->set_pending(o, params);
4129
4130 /* Don't send a command if only driver cleanup was requested */
4131 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4132 o->complete_cmd(bp, o, pending_bit);
4133 else {
4134 /* Send a ramrod */
4135 rc = o->send_cmd(bp, params);
4136 if (rc) {
4137 o->next_state = BNX2X_Q_STATE_MAX;
4138 clear_bit(pending_bit, pending);
4139 smp_mb__after_clear_bit();
4140 return rc;
4141 }
4142
4143 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4144 rc = o->wait_comp(bp, o, pending_bit);
4145 if (rc)
4146 return rc;
4147
4148 return 0;
4149 }
4150 }
4151
4152 return !!test_bit(pending_bit, pending);
4153}
4154
4155
4156static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4157 struct bnx2x_queue_state_params *params)
4158{
4159 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4160
4161 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4162 * UPDATE command.
4163 */
4164 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4165 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4166 bit = BNX2X_Q_CMD_UPDATE;
4167 else
4168 bit = cmd;
4169
4170 set_bit(bit, &obj->pending);
4171 return bit;
4172}
4173
4174static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4175 struct bnx2x_queue_sp_obj *o,
4176 enum bnx2x_queue_cmd cmd)
4177{
4178 return bnx2x_state_wait(bp, cmd, &o->pending);
4179}
4180
4181/**
4182 * bnx2x_queue_comp_cmd - complete the state change command.
4183 *
4184 * @bp: device handle
4185 * @o:
4186 * @cmd:
4187 *
4188 * Checks that the arrived completion is expected.
4189 */
4190static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4191 struct bnx2x_queue_sp_obj *o,
4192 enum bnx2x_queue_cmd cmd)
4193{
4194 unsigned long cur_pending = o->pending;
4195
4196 if (!test_and_clear_bit(cmd, &cur_pending)) {
4197 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
6383c0b3
AE
4198 "pending 0x%lx, next_state %d\n", cmd,
4199 o->cids[BNX2X_PRIMARY_CID_INDEX],
619c5cb6
VZ
4200 o->state, cur_pending, o->next_state);
4201 return -EINVAL;
4202 }
4203
6383c0b3
AE
4204 if (o->next_tx_only >= o->max_cos)
4205 /* >= becuase tx only must always be smaller than cos since the
4206 * primary connection suports COS 0
4207 */
4208 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4209 o->next_tx_only, o->max_cos);
4210
619c5cb6 4211 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
6383c0b3
AE
4212 "setting state to %d\n", cmd,
4213 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4214
4215 if (o->next_tx_only) /* print num tx-only if any exist */
4216 DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
4217 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
619c5cb6
VZ
4218
4219 o->state = o->next_state;
6383c0b3 4220 o->num_tx_only = o->next_tx_only;
619c5cb6
VZ
4221 o->next_state = BNX2X_Q_STATE_MAX;
4222
4223 /* It's important that o->state and o->next_state are
4224 * updated before o->pending.
4225 */
4226 wmb();
4227
4228 clear_bit(cmd, &o->pending);
4229 smp_mb__after_clear_bit();
4230
4231 return 0;
4232}
4233
4234static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4235 struct bnx2x_queue_state_params *cmd_params,
4236 struct client_init_ramrod_data *data)
4237{
4238 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4239
4240 /* Rx data */
4241
4242 /* IPv6 TPA supported for E2 and above only */
4243 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA, &params->flags) *
4244 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4245}
4246
6383c0b3
AE
4247static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4248 struct bnx2x_queue_sp_obj *o,
4249 struct bnx2x_general_setup_params *params,
4250 struct client_init_general_data *gen_data,
4251 unsigned long *flags)
4252{
4253 gen_data->client_id = o->cl_id;
4254
4255 if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4256 gen_data->statistics_counter_id =
4257 params->stat_id;
4258 gen_data->statistics_en_flg = 1;
4259 gen_data->statistics_zero_flg =
4260 test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
619c5cb6 4261 } else
6383c0b3 4262 gen_data->statistics_counter_id =
619c5cb6
VZ
4263 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4264
6383c0b3
AE
4265 gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4266 gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4267 gen_data->sp_client_id = params->spcl_id;
4268 gen_data->mtu = cpu_to_le16(params->mtu);
4269 gen_data->func_id = o->func_id;
619c5cb6
VZ
4270
4271
6383c0b3 4272 gen_data->cos = params->cos;
619c5cb6 4273
6383c0b3
AE
4274 gen_data->traffic_type =
4275 test_bit(BNX2X_Q_FLG_FCOE, flags) ?
619c5cb6
VZ
4276 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4277
6383c0b3
AE
4278 DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
4279 gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4280}
4281
4282static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4283 struct bnx2x_txq_setup_params *params,
4284 struct client_init_tx_data *tx_data,
4285 unsigned long *flags)
4286{
4287 tx_data->enforce_security_flg =
4288 test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4289 tx_data->default_vlan =
4290 cpu_to_le16(params->default_vlan);
4291 tx_data->default_vlan_flg =
4292 test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4293 tx_data->tx_switching_flg =
4294 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4295 tx_data->anti_spoofing_flg =
4296 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4297 tx_data->tx_status_block_id = params->fw_sb_id;
4298 tx_data->tx_sb_index_number = params->sb_cq_index;
4299 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4300
4301 tx_data->tx_bd_page_base.lo =
4302 cpu_to_le32(U64_LO(params->dscr_map));
4303 tx_data->tx_bd_page_base.hi =
4304 cpu_to_le32(U64_HI(params->dscr_map));
4305
4306 /* Don't configure any Tx switching mode during queue SETUP */
4307 tx_data->state = 0;
4308}
4309
4310static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4311 struct rxq_pause_params *params,
4312 struct client_init_rx_data *rx_data)
4313{
4314 /* flow control data */
4315 rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4316 rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4317 rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4318 rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4319 rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4320 rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4321 rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4322}
4323
4324static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4325 struct bnx2x_rxq_setup_params *params,
4326 struct client_init_rx_data *rx_data,
4327 unsigned long *flags)
4328{
4329 /* Rx data */
4330 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
619c5cb6 4331 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
6383c0b3 4332 rx_data->vmqueue_mode_en_flg = 0;
619c5cb6 4333
6383c0b3
AE
4334 rx_data->cache_line_alignment_log_size =
4335 params->cache_line_log;
4336 rx_data->enable_dynamic_hc =
4337 test_bit(BNX2X_Q_FLG_DHC, flags);
4338 rx_data->max_sges_for_packet = params->max_sges_pkt;
4339 rx_data->client_qzone_id = params->cl_qzone_id;
4340 rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
619c5cb6
VZ
4341
4342 /* Always start in DROP_ALL mode */
6383c0b3 4343 rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
619c5cb6
VZ
4344 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4345
4346 /* We don't set drop flags */
6383c0b3
AE
4347 rx_data->drop_ip_cs_err_flg = 0;
4348 rx_data->drop_tcp_cs_err_flg = 0;
4349 rx_data->drop_ttl0_flg = 0;
4350 rx_data->drop_udp_cs_err_flg = 0;
4351 rx_data->inner_vlan_removal_enable_flg =
4352 test_bit(BNX2X_Q_FLG_VLAN, flags);
4353 rx_data->outer_vlan_removal_enable_flg =
4354 test_bit(BNX2X_Q_FLG_OV, flags);
4355 rx_data->status_block_id = params->fw_sb_id;
4356 rx_data->rx_sb_index_number = params->sb_cq_index;
4357 rx_data->max_tpa_queues = params->max_tpa_queues;
4358 rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4359 rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4360 rx_data->bd_page_base.lo =
4361 cpu_to_le32(U64_LO(params->dscr_map));
4362 rx_data->bd_page_base.hi =
4363 cpu_to_le32(U64_HI(params->dscr_map));
4364 rx_data->sge_page_base.lo =
4365 cpu_to_le32(U64_LO(params->sge_map));
4366 rx_data->sge_page_base.hi =
4367 cpu_to_le32(U64_HI(params->sge_map));
4368 rx_data->cqe_page_base.lo =
4369 cpu_to_le32(U64_LO(params->rcq_map));
4370 rx_data->cqe_page_base.hi =
4371 cpu_to_le32(U64_HI(params->rcq_map));
4372 rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4373
4374 if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4375 rx_data->approx_mcast_engine_id = o->func_id;
4376 rx_data->is_approx_mcast = 1;
619c5cb6
VZ
4377 }
4378
6383c0b3 4379 rx_data->rss_engine_id = params->rss_engine_id;
619c5cb6
VZ
4380
4381 /* silent vlan removal */
6383c0b3
AE
4382 rx_data->silent_vlan_removal_flg =
4383 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4384 rx_data->silent_vlan_value =
4385 cpu_to_le16(params->silent_removal_value);
4386 rx_data->silent_vlan_mask =
4387 cpu_to_le16(params->silent_removal_mask);
619c5cb6 4388
619c5cb6
VZ
4389}
4390
6383c0b3
AE
4391/* initialize the general, tx and rx parts of a queue object */
4392static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4393 struct bnx2x_queue_state_params *cmd_params,
4394 struct client_init_ramrod_data *data)
4395{
4396 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4397 &cmd_params->params.setup.gen_params,
4398 &data->general,
4399 &cmd_params->params.setup.flags);
4400
4401 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4402 &cmd_params->params.setup.txq_params,
4403 &data->tx,
4404 &cmd_params->params.setup.flags);
4405
4406 bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4407 &cmd_params->params.setup.rxq_params,
4408 &data->rx,
4409 &cmd_params->params.setup.flags);
4410
4411 bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4412 &cmd_params->params.setup.pause_params,
4413 &data->rx);
4414}
4415
4416/* initialize the general and tx parts of a tx-only queue object */
4417static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4418 struct bnx2x_queue_state_params *cmd_params,
4419 struct tx_queue_init_ramrod_data *data)
4420{
4421 bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4422 &cmd_params->params.tx_only.gen_params,
4423 &data->general,
4424 &cmd_params->params.tx_only.flags);
4425
4426 bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4427 &cmd_params->params.tx_only.txq_params,
4428 &data->tx,
4429 &cmd_params->params.tx_only.flags);
4430
4431 DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
4432 data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4433}
619c5cb6
VZ
4434
4435/**
4436 * bnx2x_q_init - init HW/FW queue
4437 *
4438 * @bp: device handle
4439 * @params:
4440 *
4441 * HW/FW initial Queue configuration:
4442 * - HC: Rx and Tx
4443 * - CDU context validation
4444 *
4445 */
4446static inline int bnx2x_q_init(struct bnx2x *bp,
4447 struct bnx2x_queue_state_params *params)
4448{
4449 struct bnx2x_queue_sp_obj *o = params->q_obj;
4450 struct bnx2x_queue_init_params *init = &params->params.init;
4451 u16 hc_usec;
6383c0b3 4452 u8 cos;
619c5cb6
VZ
4453
4454 /* Tx HC configuration */
4455 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4456 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4457 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4458
4459 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4460 init->tx.sb_cq_index,
4461 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4462 hc_usec);
4463 }
4464
4465 /* Rx HC configuration */
4466 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4467 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4468 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4469
4470 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4471 init->rx.sb_cq_index,
4472 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4473 hc_usec);
4474 }
4475
4476 /* Set CDU context validation values */
6383c0b3
AE
4477 for (cos = 0; cos < o->max_cos; cos++) {
4478 DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
4479 o->cids[cos], cos);
4480 DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
4481 bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4482 }
619c5cb6
VZ
4483
4484 /* As no ramrod is sent, complete the command immediately */
4485 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4486
4487 mmiowb();
4488 smp_mb();
4489
4490 return 0;
4491}
4492
4493static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4494 struct bnx2x_queue_state_params *params)
4495{
4496 struct bnx2x_queue_sp_obj *o = params->q_obj;
4497 struct client_init_ramrod_data *rdata =
4498 (struct client_init_ramrod_data *)o->rdata;
4499 dma_addr_t data_mapping = o->rdata_mapping;
4500 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4501
4502 /* Clear the ramrod data */
4503 memset(rdata, 0, sizeof(*rdata));
4504
4505 /* Fill the ramrod data */
4506 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4507
4508 mb();
4509
6383c0b3
AE
4510 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4511 U64_HI(data_mapping),
619c5cb6
VZ
4512 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4513}
4514
4515static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4516 struct bnx2x_queue_state_params *params)
4517{
4518 struct bnx2x_queue_sp_obj *o = params->q_obj;
4519 struct client_init_ramrod_data *rdata =
4520 (struct client_init_ramrod_data *)o->rdata;
4521 dma_addr_t data_mapping = o->rdata_mapping;
4522 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4523
4524 /* Clear the ramrod data */
4525 memset(rdata, 0, sizeof(*rdata));
4526
4527 /* Fill the ramrod data */
4528 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4529 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4530
619c5cb6 4531
6383c0b3
AE
4532 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4533 U64_HI(data_mapping),
4534 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4535}
4536
4537static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4538 struct bnx2x_queue_state_params *params)
4539{
4540 struct bnx2x_queue_sp_obj *o = params->q_obj;
4541 struct tx_queue_init_ramrod_data *rdata =
4542 (struct tx_queue_init_ramrod_data *)o->rdata;
4543 dma_addr_t data_mapping = o->rdata_mapping;
4544 int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4545 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4546 &params->params.tx_only;
4547 u8 cid_index = tx_only_params->cid_index;
4548
4549
4550 if (cid_index >= o->max_cos) {
4551 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4552 o->cl_id, cid_index);
4553 return -EINVAL;
4554 }
4555
4556 DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
4557 tx_only_params->gen_params.cos,
4558 tx_only_params->gen_params.spcl_id);
4559
4560 /* Clear the ramrod data */
4561 memset(rdata, 0, sizeof(*rdata));
4562
4563 /* Fill the ramrod data */
4564 bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4565
4566 DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4567 "sp-client id %d, cos %d",
4568 o->cids[cid_index],
4569 rdata->general.client_id,
4570 rdata->general.sp_client_id, rdata->general.cos);
4571
4572 /*
4573 * No need for an explicit memory barrier here as long we would
4574 * need to ensure the ordering of writing to the SPQ element
4575 * and updating of the SPQ producer which involves a memory
4576 * read and we will have to put a full memory barrier there
4577 * (inside bnx2x_sp_post()).
4578 */
4579
4580 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4581 U64_HI(data_mapping),
619c5cb6
VZ
4582 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4583}
4584
4585static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4586 struct bnx2x_queue_sp_obj *obj,
4587 struct bnx2x_queue_update_params *params,
4588 struct client_update_ramrod_data *data)
4589{
4590 /* Client ID of the client to update */
4591 data->client_id = obj->cl_id;
4592
4593 /* Function ID of the client to update */
4594 data->func_id = obj->func_id;
4595
4596 /* Default VLAN value */
4597 data->default_vlan = cpu_to_le16(params->def_vlan);
4598
4599 /* Inner VLAN stripping */
4600 data->inner_vlan_removal_enable_flg =
4601 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4602 data->inner_vlan_removal_change_flg =
4603 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4604 &params->update_flags);
4605
4606 /* Outer VLAN sripping */
4607 data->outer_vlan_removal_enable_flg =
4608 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4609 data->outer_vlan_removal_change_flg =
4610 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4611 &params->update_flags);
4612
4613 /* Drop packets that have source MAC that doesn't belong to this
4614 * Queue.
4615 */
4616 data->anti_spoofing_enable_flg =
4617 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4618 data->anti_spoofing_change_flg =
4619 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4620
4621 /* Activate/Deactivate */
4622 data->activate_flg =
4623 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4624 data->activate_change_flg =
4625 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4626
4627 /* Enable default VLAN */
4628 data->default_vlan_enable_flg =
4629 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4630 data->default_vlan_change_flg =
4631 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4632 &params->update_flags);
4633
4634 /* silent vlan removal */
4635 data->silent_vlan_change_flg =
4636 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4637 &params->update_flags);
4638 data->silent_vlan_removal_flg =
4639 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4640 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4641 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4642}
4643
4644static inline int bnx2x_q_send_update(struct bnx2x *bp,
4645 struct bnx2x_queue_state_params *params)
4646{
4647 struct bnx2x_queue_sp_obj *o = params->q_obj;
4648 struct client_update_ramrod_data *rdata =
4649 (struct client_update_ramrod_data *)o->rdata;
4650 dma_addr_t data_mapping = o->rdata_mapping;
6383c0b3
AE
4651 struct bnx2x_queue_update_params *update_params =
4652 &params->params.update;
4653 u8 cid_index = update_params->cid_index;
4654
4655 if (cid_index >= o->max_cos) {
4656 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4657 o->cl_id, cid_index);
4658 return -EINVAL;
4659 }
4660
619c5cb6
VZ
4661
4662 /* Clear the ramrod data */
4663 memset(rdata, 0, sizeof(*rdata));
4664
4665 /* Fill the ramrod data */
6383c0b3 4666 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
619c5cb6
VZ
4667
4668 mb();
4669
6383c0b3
AE
4670 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4671 o->cids[cid_index], U64_HI(data_mapping),
619c5cb6
VZ
4672 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4673}
4674
4675/**
4676 * bnx2x_q_send_deactivate - send DEACTIVATE command
4677 *
4678 * @bp: device handle
4679 * @params:
4680 *
4681 * implemented using the UPDATE command.
4682 */
4683static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4684 struct bnx2x_queue_state_params *params)
4685{
4686 struct bnx2x_queue_update_params *update = &params->params.update;
4687
4688 memset(update, 0, sizeof(*update));
4689
4690 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4691
4692 return bnx2x_q_send_update(bp, params);
4693}
4694
4695/**
4696 * bnx2x_q_send_activate - send ACTIVATE command
4697 *
4698 * @bp: device handle
4699 * @params:
4700 *
4701 * implemented using the UPDATE command.
4702 */
4703static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4704 struct bnx2x_queue_state_params *params)
4705{
4706 struct bnx2x_queue_update_params *update = &params->params.update;
4707
4708 memset(update, 0, sizeof(*update));
4709
4710 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4711 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4712
4713 return bnx2x_q_send_update(bp, params);
4714}
4715
4716static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4717 struct bnx2x_queue_state_params *params)
4718{
4719 /* TODO: Not implemented yet. */
4720 return -1;
4721}
4722
4723static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4724 struct bnx2x_queue_state_params *params)
4725{
4726 struct bnx2x_queue_sp_obj *o = params->q_obj;
4727
6383c0b3
AE
4728 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4729 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
619c5cb6
VZ
4730 ETH_CONNECTION_TYPE);
4731}
4732
4733static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4734 struct bnx2x_queue_state_params *params)
4735{
4736 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4737 u8 cid_idx = params->params.cfc_del.cid_index;
619c5cb6 4738
6383c0b3
AE
4739 if (cid_idx >= o->max_cos) {
4740 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4741 o->cl_id, cid_idx);
4742 return -EINVAL;
4743 }
4744
4745 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4746 o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
619c5cb6
VZ
4747}
4748
4749static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4750 struct bnx2x_queue_state_params *params)
4751{
4752 struct bnx2x_queue_sp_obj *o = params->q_obj;
6383c0b3 4753 u8 cid_index = params->params.terminate.cid_index;
619c5cb6 4754
6383c0b3
AE
4755 if (cid_index >= o->max_cos) {
4756 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4757 o->cl_id, cid_index);
4758 return -EINVAL;
4759 }
4760
4761 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4762 o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
619c5cb6
VZ
4763}
4764
4765static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4766 struct bnx2x_queue_state_params *params)
4767{
4768 struct bnx2x_queue_sp_obj *o = params->q_obj;
4769
6383c0b3
AE
4770 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4771 o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
619c5cb6
VZ
4772 ETH_CONNECTION_TYPE);
4773}
4774
4775static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4776 struct bnx2x_queue_state_params *params)
4777{
4778 switch (params->cmd) {
4779 case BNX2X_Q_CMD_INIT:
4780 return bnx2x_q_init(bp, params);
6383c0b3
AE
4781 case BNX2X_Q_CMD_SETUP_TX_ONLY:
4782 return bnx2x_q_send_setup_tx_only(bp, params);
619c5cb6
VZ
4783 case BNX2X_Q_CMD_DEACTIVATE:
4784 return bnx2x_q_send_deactivate(bp, params);
4785 case BNX2X_Q_CMD_ACTIVATE:
4786 return bnx2x_q_send_activate(bp, params);
4787 case BNX2X_Q_CMD_UPDATE:
4788 return bnx2x_q_send_update(bp, params);
4789 case BNX2X_Q_CMD_UPDATE_TPA:
4790 return bnx2x_q_send_update_tpa(bp, params);
4791 case BNX2X_Q_CMD_HALT:
4792 return bnx2x_q_send_halt(bp, params);
4793 case BNX2X_Q_CMD_CFC_DEL:
4794 return bnx2x_q_send_cfc_del(bp, params);
4795 case BNX2X_Q_CMD_TERMINATE:
4796 return bnx2x_q_send_terminate(bp, params);
4797 case BNX2X_Q_CMD_EMPTY:
4798 return bnx2x_q_send_empty(bp, params);
4799 default:
4800 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4801 return -EINVAL;
4802 }
4803}
4804
4805static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4806 struct bnx2x_queue_state_params *params)
4807{
4808 switch (params->cmd) {
4809 case BNX2X_Q_CMD_SETUP:
4810 return bnx2x_q_send_setup_e1x(bp, params);
4811 case BNX2X_Q_CMD_INIT:
6383c0b3 4812 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4813 case BNX2X_Q_CMD_DEACTIVATE:
4814 case BNX2X_Q_CMD_ACTIVATE:
4815 case BNX2X_Q_CMD_UPDATE:
4816 case BNX2X_Q_CMD_UPDATE_TPA:
4817 case BNX2X_Q_CMD_HALT:
4818 case BNX2X_Q_CMD_CFC_DEL:
4819 case BNX2X_Q_CMD_TERMINATE:
4820 case BNX2X_Q_CMD_EMPTY:
4821 return bnx2x_queue_send_cmd_cmn(bp, params);
4822 default:
4823 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4824 return -EINVAL;
4825 }
4826}
4827
4828static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4829 struct bnx2x_queue_state_params *params)
4830{
4831 switch (params->cmd) {
4832 case BNX2X_Q_CMD_SETUP:
4833 return bnx2x_q_send_setup_e2(bp, params);
4834 case BNX2X_Q_CMD_INIT:
6383c0b3 4835 case BNX2X_Q_CMD_SETUP_TX_ONLY:
619c5cb6
VZ
4836 case BNX2X_Q_CMD_DEACTIVATE:
4837 case BNX2X_Q_CMD_ACTIVATE:
4838 case BNX2X_Q_CMD_UPDATE:
4839 case BNX2X_Q_CMD_UPDATE_TPA:
4840 case BNX2X_Q_CMD_HALT:
4841 case BNX2X_Q_CMD_CFC_DEL:
4842 case BNX2X_Q_CMD_TERMINATE:
4843 case BNX2X_Q_CMD_EMPTY:
4844 return bnx2x_queue_send_cmd_cmn(bp, params);
4845 default:
4846 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4847 return -EINVAL;
4848 }
4849}
4850
4851/**
4852 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4853 *
4854 * @bp: device handle
4855 * @o:
4856 * @params:
4857 *
4858 * (not Forwarding)
4859 * It both checks if the requested command is legal in a current
4860 * state and, if it's legal, sets a `next_state' in the object
4861 * that will be used in the completion flow to set the `state'
4862 * of the object.
4863 *
4864 * returns 0 if a requested command is a legal transition,
4865 * -EINVAL otherwise.
4866 */
4867static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4868 struct bnx2x_queue_sp_obj *o,
4869 struct bnx2x_queue_state_params *params)
4870{
4871 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4872 enum bnx2x_queue_cmd cmd = params->cmd;
6383c0b3
AE
4873 struct bnx2x_queue_update_params *update_params =
4874 &params->params.update;
4875 u8 next_tx_only = o->num_tx_only;
619c5cb6
VZ
4876
4877 switch (state) {
4878 case BNX2X_Q_STATE_RESET:
4879 if (cmd == BNX2X_Q_CMD_INIT)
4880 next_state = BNX2X_Q_STATE_INITIALIZED;
4881
4882 break;
4883 case BNX2X_Q_STATE_INITIALIZED:
4884 if (cmd == BNX2X_Q_CMD_SETUP) {
4885 if (test_bit(BNX2X_Q_FLG_ACTIVE,
4886 &params->params.setup.flags))
4887 next_state = BNX2X_Q_STATE_ACTIVE;
4888 else
4889 next_state = BNX2X_Q_STATE_INACTIVE;
4890 }
4891
4892 break;
4893 case BNX2X_Q_STATE_ACTIVE:
4894 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4895 next_state = BNX2X_Q_STATE_INACTIVE;
4896
4897 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4898 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4899 next_state = BNX2X_Q_STATE_ACTIVE;
4900
6383c0b3
AE
4901 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4902 next_state = BNX2X_Q_STATE_MULTI_COS;
4903 next_tx_only = 1;
4904 }
4905
619c5cb6
VZ
4906 else if (cmd == BNX2X_Q_CMD_HALT)
4907 next_state = BNX2X_Q_STATE_STOPPED;
4908
4909 else if (cmd == BNX2X_Q_CMD_UPDATE) {
6383c0b3
AE
4910 /* If "active" state change is requested, update the
4911 * state accordingly.
4912 */
4913 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4914 &update_params->update_flags) &&
4915 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4916 &update_params->update_flags))
4917 next_state = BNX2X_Q_STATE_INACTIVE;
4918 else
4919 next_state = BNX2X_Q_STATE_ACTIVE;
4920 }
4921
4922 break;
4923 case BNX2X_Q_STATE_MULTI_COS:
4924 if (cmd == BNX2X_Q_CMD_TERMINATE)
4925 next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
4926
4927 else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4928 next_state = BNX2X_Q_STATE_MULTI_COS;
4929 next_tx_only = o->num_tx_only + 1;
4930 }
4931
4932 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4933 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4934 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6 4935
6383c0b3 4936 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
4937 /* If "active" state change is requested, update the
4938 * state accordingly.
4939 */
4940 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4941 &update_params->update_flags) &&
4942 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4943 &update_params->update_flags))
4944 next_state = BNX2X_Q_STATE_INACTIVE;
4945 else
6383c0b3
AE
4946 next_state = BNX2X_Q_STATE_MULTI_COS;
4947 }
4948
4949 break;
4950 case BNX2X_Q_STATE_MCOS_TERMINATED:
4951 if (cmd == BNX2X_Q_CMD_CFC_DEL) {
4952 next_tx_only = o->num_tx_only - 1;
4953 if (next_tx_only == 0)
619c5cb6 4954 next_state = BNX2X_Q_STATE_ACTIVE;
6383c0b3
AE
4955 else
4956 next_state = BNX2X_Q_STATE_MULTI_COS;
619c5cb6
VZ
4957 }
4958
4959 break;
4960 case BNX2X_Q_STATE_INACTIVE:
4961 if (cmd == BNX2X_Q_CMD_ACTIVATE)
4962 next_state = BNX2X_Q_STATE_ACTIVE;
4963
4964 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4965 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4966 next_state = BNX2X_Q_STATE_INACTIVE;
4967
4968 else if (cmd == BNX2X_Q_CMD_HALT)
4969 next_state = BNX2X_Q_STATE_STOPPED;
4970
4971 else if (cmd == BNX2X_Q_CMD_UPDATE) {
619c5cb6
VZ
4972 /* If "active" state change is requested, update the
4973 * state accordingly.
4974 */
4975 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4976 &update_params->update_flags) &&
4977 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
6383c0b3
AE
4978 &update_params->update_flags)){
4979 if (o->num_tx_only == 0)
4980 next_state = BNX2X_Q_STATE_ACTIVE;
4981 else /* tx only queues exist for this queue */
4982 next_state = BNX2X_Q_STATE_MULTI_COS;
4983 } else
619c5cb6
VZ
4984 next_state = BNX2X_Q_STATE_INACTIVE;
4985 }
4986
4987 break;
4988 case BNX2X_Q_STATE_STOPPED:
4989 if (cmd == BNX2X_Q_CMD_TERMINATE)
4990 next_state = BNX2X_Q_STATE_TERMINATED;
4991
4992 break;
4993 case BNX2X_Q_STATE_TERMINATED:
4994 if (cmd == BNX2X_Q_CMD_CFC_DEL)
4995 next_state = BNX2X_Q_STATE_RESET;
4996
4997 break;
4998 default:
4999 BNX2X_ERR("Illegal state: %d\n", state);
5000 }
5001
5002 /* Transition is assured */
5003 if (next_state != BNX2X_Q_STATE_MAX) {
5004 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5005 state, cmd, next_state);
5006 o->next_state = next_state;
6383c0b3 5007 o->next_tx_only = next_tx_only;
619c5cb6
VZ
5008 return 0;
5009 }
5010
5011 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5012
5013 return -EINVAL;
5014}
5015
5016void bnx2x_init_queue_obj(struct bnx2x *bp,
5017 struct bnx2x_queue_sp_obj *obj,
6383c0b3
AE
5018 u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5019 void *rdata,
619c5cb6
VZ
5020 dma_addr_t rdata_mapping, unsigned long type)
5021{
5022 memset(obj, 0, sizeof(*obj));
5023
6383c0b3
AE
5024 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5025 BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5026
5027 memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5028 obj->max_cos = cid_cnt;
619c5cb6
VZ
5029 obj->cl_id = cl_id;
5030 obj->func_id = func_id;
5031 obj->rdata = rdata;
5032 obj->rdata_mapping = rdata_mapping;
5033 obj->type = type;
5034 obj->next_state = BNX2X_Q_STATE_MAX;
5035
5036 if (CHIP_IS_E1x(bp))
5037 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5038 else
5039 obj->send_cmd = bnx2x_queue_send_cmd_e2;
5040
5041 obj->check_transition = bnx2x_queue_chk_transition;
5042
5043 obj->complete_cmd = bnx2x_queue_comp_cmd;
5044 obj->wait_comp = bnx2x_queue_wait_comp;
5045 obj->set_pending = bnx2x_queue_set_pending;
5046}
5047
6383c0b3
AE
5048void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5049 struct bnx2x_queue_sp_obj *obj,
5050 u32 cid, u8 index)
5051{
5052 obj->cids[index] = cid;
5053}
5054
619c5cb6
VZ
5055/********************** Function state object *********************************/
5056
5057static int bnx2x_func_wait_comp(struct bnx2x *bp,
5058 struct bnx2x_func_sp_obj *o,
5059 enum bnx2x_func_cmd cmd)
5060{
5061 return bnx2x_state_wait(bp, cmd, &o->pending);
5062}
5063
5064/**
5065 * bnx2x_func_state_change_comp - complete the state machine transition
5066 *
5067 * @bp: device handle
5068 * @o:
5069 * @cmd:
5070 *
5071 * Called on state change transition. Completes the state
5072 * machine transition only - no HW interaction.
5073 */
5074static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5075 struct bnx2x_func_sp_obj *o,
5076 enum bnx2x_func_cmd cmd)
5077{
5078 unsigned long cur_pending = o->pending;
5079
5080 if (!test_and_clear_bit(cmd, &cur_pending)) {
5081 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5082 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5083 o->state, cur_pending, o->next_state);
5084 return -EINVAL;
5085 }
5086
5087 DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
5088 "%d\n", cmd, BP_FUNC(bp), o->next_state);
5089
5090 o->state = o->next_state;
5091 o->next_state = BNX2X_F_STATE_MAX;
5092
5093 /* It's important that o->state and o->next_state are
5094 * updated before o->pending.
5095 */
5096 wmb();
5097
5098 clear_bit(cmd, &o->pending);
5099 smp_mb__after_clear_bit();
5100
5101 return 0;
5102}
5103
5104/**
5105 * bnx2x_func_comp_cmd - complete the state change command
5106 *
5107 * @bp: device handle
5108 * @o:
5109 * @cmd:
5110 *
5111 * Checks that the arrived completion is expected.
5112 */
5113static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5114 struct bnx2x_func_sp_obj *o,
5115 enum bnx2x_func_cmd cmd)
5116{
5117 /* Complete the state machine part first, check if it's a
5118 * legal completion.
5119 */
5120 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5121 return rc;
5122}
5123
5124/**
5125 * bnx2x_func_chk_transition - perform function state machine transition
5126 *
5127 * @bp: device handle
5128 * @o:
5129 * @params:
5130 *
5131 * It both checks if the requested command is legal in a current
5132 * state and, if it's legal, sets a `next_state' in the object
5133 * that will be used in the completion flow to set the `state'
5134 * of the object.
5135 *
5136 * returns 0 if a requested command is a legal transition,
5137 * -EINVAL otherwise.
5138 */
5139static int bnx2x_func_chk_transition(struct bnx2x *bp,
5140 struct bnx2x_func_sp_obj *o,
5141 struct bnx2x_func_state_params *params)
5142{
5143 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5144 enum bnx2x_func_cmd cmd = params->cmd;
5145
5146 switch (state) {
5147 case BNX2X_F_STATE_RESET:
5148 if (cmd == BNX2X_F_CMD_HW_INIT)
5149 next_state = BNX2X_F_STATE_INITIALIZED;
5150
5151 break;
5152 case BNX2X_F_STATE_INITIALIZED:
5153 if (cmd == BNX2X_F_CMD_START)
5154 next_state = BNX2X_F_STATE_STARTED;
5155
5156 else if (cmd == BNX2X_F_CMD_HW_RESET)
5157 next_state = BNX2X_F_STATE_RESET;
5158
5159 break;
5160 case BNX2X_F_STATE_STARTED:
5161 if (cmd == BNX2X_F_CMD_STOP)
5162 next_state = BNX2X_F_STATE_INITIALIZED;
5163
5164 break;
5165 default:
5166 BNX2X_ERR("Unknown state: %d\n", state);
5167 }
5168
5169 /* Transition is assured */
5170 if (next_state != BNX2X_F_STATE_MAX) {
5171 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5172 state, cmd, next_state);
5173 o->next_state = next_state;
5174 return 0;
5175 }
5176
5177 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5178 state, cmd);
5179
5180 return -EINVAL;
5181}
5182
5183/**
5184 * bnx2x_func_init_func - performs HW init at function stage
5185 *
5186 * @bp: device handle
5187 * @drv:
5188 *
5189 * Init HW when the current phase is
5190 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5191 * HW blocks.
5192 */
5193static inline int bnx2x_func_init_func(struct bnx2x *bp,
5194 const struct bnx2x_func_sp_drv_ops *drv)
5195{
5196 return drv->init_hw_func(bp);
5197}
5198
5199/**
5200 * bnx2x_func_init_port - performs HW init at port stage
5201 *
5202 * @bp: device handle
5203 * @drv:
5204 *
5205 * Init HW when the current phase is
5206 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5207 * FUNCTION-only HW blocks.
5208 *
5209 */
5210static inline int bnx2x_func_init_port(struct bnx2x *bp,
5211 const struct bnx2x_func_sp_drv_ops *drv)
5212{
5213 int rc = drv->init_hw_port(bp);
5214 if (rc)
5215 return rc;
5216
5217 return bnx2x_func_init_func(bp, drv);
5218}
5219
5220/**
5221 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5222 *
5223 * @bp: device handle
5224 * @drv:
5225 *
5226 * Init HW when the current phase is
5227 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5228 * PORT-only and FUNCTION-only HW blocks.
5229 */
5230static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5231 const struct bnx2x_func_sp_drv_ops *drv)
5232{
5233 int rc = drv->init_hw_cmn_chip(bp);
5234 if (rc)
5235 return rc;
5236
5237 return bnx2x_func_init_port(bp, drv);
5238}
5239
5240/**
5241 * bnx2x_func_init_cmn - performs HW init at common stage
5242 *
5243 * @bp: device handle
5244 * @drv:
5245 *
5246 * Init HW when the current phase is
5247 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5248 * PORT-only and FUNCTION-only HW blocks.
5249 */
5250static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5251 const struct bnx2x_func_sp_drv_ops *drv)
5252{
5253 int rc = drv->init_hw_cmn(bp);
5254 if (rc)
5255 return rc;
5256
5257 return bnx2x_func_init_port(bp, drv);
5258}
5259
5260static int bnx2x_func_hw_init(struct bnx2x *bp,
5261 struct bnx2x_func_state_params *params)
5262{
5263 u32 load_code = params->params.hw_init.load_phase;
5264 struct bnx2x_func_sp_obj *o = params->f_obj;
5265 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5266 int rc = 0;
5267
5268 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5269 BP_ABS_FUNC(bp), load_code);
5270
5271 /* Prepare buffers for unzipping the FW */
5272 rc = drv->gunzip_init(bp);
5273 if (rc)
5274 return rc;
5275
5276 /* Prepare FW */
5277 rc = drv->init_fw(bp);
5278 if (rc) {
5279 BNX2X_ERR("Error loading firmware\n");
5280 goto fw_init_err;
5281 }
5282
5283 /* Handle the beginning of COMMON_XXX pases separatelly... */
5284 switch (load_code) {
5285 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5286 rc = bnx2x_func_init_cmn_chip(bp, drv);
5287 if (rc)
5288 goto init_hw_err;
5289
5290 break;
5291 case FW_MSG_CODE_DRV_LOAD_COMMON:
5292 rc = bnx2x_func_init_cmn(bp, drv);
5293 if (rc)
5294 goto init_hw_err;
5295
5296 break;
5297 case FW_MSG_CODE_DRV_LOAD_PORT:
5298 rc = bnx2x_func_init_port(bp, drv);
5299 if (rc)
5300 goto init_hw_err;
5301
5302 break;
5303 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5304 rc = bnx2x_func_init_func(bp, drv);
5305 if (rc)
5306 goto init_hw_err;
5307
5308 break;
5309 default:
5310 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5311 rc = -EINVAL;
5312 }
5313
5314init_hw_err:
5315 drv->release_fw(bp);
5316
5317fw_init_err:
5318 drv->gunzip_end(bp);
5319
5320 /* In case of success, complete the comand immediatelly: no ramrods
5321 * have been sent.
5322 */
5323 if (!rc)
5324 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5325
5326 return rc;
5327}
5328
5329/**
5330 * bnx2x_func_reset_func - reset HW at function stage
5331 *
5332 * @bp: device handle
5333 * @drv:
5334 *
5335 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5336 * FUNCTION-only HW blocks.
5337 */
5338static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5339 const struct bnx2x_func_sp_drv_ops *drv)
5340{
5341 drv->reset_hw_func(bp);
5342}
5343
5344/**
5345 * bnx2x_func_reset_port - reser HW at port stage
5346 *
5347 * @bp: device handle
5348 * @drv:
5349 *
5350 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5351 * FUNCTION-only and PORT-only HW blocks.
5352 *
5353 * !!!IMPORTANT!!!
5354 *
5355 * It's important to call reset_port before reset_func() as the last thing
5356 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5357 * makes impossible any DMAE transactions.
5358 */
5359static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5360 const struct bnx2x_func_sp_drv_ops *drv)
5361{
5362 drv->reset_hw_port(bp);
5363 bnx2x_func_reset_func(bp, drv);
5364}
5365
5366/**
5367 * bnx2x_func_reset_cmn - reser HW at common stage
5368 *
5369 * @bp: device handle
5370 * @drv:
5371 *
5372 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5373 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5374 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5375 */
5376static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5377 const struct bnx2x_func_sp_drv_ops *drv)
5378{
5379 bnx2x_func_reset_port(bp, drv);
5380 drv->reset_hw_cmn(bp);
5381}
5382
5383
5384static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5385 struct bnx2x_func_state_params *params)
5386{
5387 u32 reset_phase = params->params.hw_reset.reset_phase;
5388 struct bnx2x_func_sp_obj *o = params->f_obj;
5389 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5390
5391 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5392 reset_phase);
5393
5394 switch (reset_phase) {
5395 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5396 bnx2x_func_reset_cmn(bp, drv);
5397 break;
5398 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5399 bnx2x_func_reset_port(bp, drv);
5400 break;
5401 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5402 bnx2x_func_reset_func(bp, drv);
5403 break;
5404 default:
5405 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5406 reset_phase);
5407 break;
5408 }
5409
5410 /* Complete the comand immediatelly: no ramrods have been sent. */
5411 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5412
5413 return 0;
5414}
5415
5416static inline int bnx2x_func_send_start(struct bnx2x *bp,
5417 struct bnx2x_func_state_params *params)
5418{
5419 struct bnx2x_func_sp_obj *o = params->f_obj;
5420 struct function_start_data *rdata =
5421 (struct function_start_data *)o->rdata;
5422 dma_addr_t data_mapping = o->rdata_mapping;
5423 struct bnx2x_func_start_params *start_params = &params->params.start;
5424
5425 memset(rdata, 0, sizeof(*rdata));
5426
5427 /* Fill the ramrod data with provided parameters */
5428 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5429 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5430 rdata->path_id = BP_PATH(bp);
5431 rdata->network_cos_mode = start_params->network_cos_mode;
5432
5433 mb();
5434
5435 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5436 U64_HI(data_mapping),
5437 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5438}
5439
5440static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5441 struct bnx2x_func_state_params *params)
5442{
5443 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5444 NONE_CONNECTION_TYPE);
5445}
5446
5447static int bnx2x_func_send_cmd(struct bnx2x *bp,
5448 struct bnx2x_func_state_params *params)
5449{
5450 switch (params->cmd) {
5451 case BNX2X_F_CMD_HW_INIT:
5452 return bnx2x_func_hw_init(bp, params);
5453 case BNX2X_F_CMD_START:
5454 return bnx2x_func_send_start(bp, params);
5455 case BNX2X_F_CMD_STOP:
5456 return bnx2x_func_send_stop(bp, params);
5457 case BNX2X_F_CMD_HW_RESET:
5458 return bnx2x_func_hw_reset(bp, params);
5459 default:
5460 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5461 return -EINVAL;
5462 }
5463}
5464
5465void bnx2x_init_func_obj(struct bnx2x *bp,
5466 struct bnx2x_func_sp_obj *obj,
5467 void *rdata, dma_addr_t rdata_mapping,
5468 struct bnx2x_func_sp_drv_ops *drv_iface)
5469{
5470 memset(obj, 0, sizeof(*obj));
5471
5472 mutex_init(&obj->one_pending_mutex);
5473
5474 obj->rdata = rdata;
5475 obj->rdata_mapping = rdata_mapping;
5476
5477 obj->send_cmd = bnx2x_func_send_cmd;
5478 obj->check_transition = bnx2x_func_chk_transition;
5479 obj->complete_cmd = bnx2x_func_comp_cmd;
5480 obj->wait_comp = bnx2x_func_wait_comp;
5481
5482 obj->drv = drv_iface;
5483}
5484
5485/**
5486 * bnx2x_func_state_change - perform Function state change transition
5487 *
5488 * @bp: device handle
5489 * @params: parameters to perform the transaction
5490 *
5491 * returns 0 in case of successfully completed transition,
5492 * negative error code in case of failure, positive
5493 * (EBUSY) value if there is a completion to that is
5494 * still pending (possible only if RAMROD_COMP_WAIT is
5495 * not set in params->ramrod_flags for asynchronous
5496 * commands).
5497 */
5498int bnx2x_func_state_change(struct bnx2x *bp,
5499 struct bnx2x_func_state_params *params)
5500{
5501 struct bnx2x_func_sp_obj *o = params->f_obj;
5502 int rc;
5503 enum bnx2x_func_cmd cmd = params->cmd;
5504 unsigned long *pending = &o->pending;
5505
5506 mutex_lock(&o->one_pending_mutex);
5507
5508 /* Check that the requested transition is legal */
5509 if (o->check_transition(bp, o, params)) {
5510 mutex_unlock(&o->one_pending_mutex);
5511 return -EINVAL;
5512 }
5513
5514 /* Set "pending" bit */
5515 set_bit(cmd, pending);
5516
5517 /* Don't send a command if only driver cleanup was requested */
5518 if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5519 bnx2x_func_state_change_comp(bp, o, cmd);
5520 mutex_unlock(&o->one_pending_mutex);
5521 } else {
5522 /* Send a ramrod */
5523 rc = o->send_cmd(bp, params);
5524
5525 mutex_unlock(&o->one_pending_mutex);
5526
5527 if (rc) {
5528 o->next_state = BNX2X_F_STATE_MAX;
5529 clear_bit(cmd, pending);
5530 smp_mb__after_clear_bit();
5531 return rc;
5532 }
5533
5534 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5535 rc = o->wait_comp(bp, o, cmd);
5536 if (rc)
5537 return rc;
5538
5539 return 0;
5540 }
5541 }
042181f5 5542
619c5cb6 5543 return !!test_bit(cmd, pending);
042181f5 5544}
This page took 0.265669 seconds and 5 git commands to generate.