be2net: add FW cmds needed for VxLAN offloads
[deliverable/linux.git] / drivers / net / ethernet / emulex / benet / be.h
CommitLineData
6b7c5b94 1/*
40263820 2 * Copyright (C) 2005 - 2014 Emulex
6b7c5b94
SP
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
d2145cde 11 * linux-drivers@emulex.com
6b7c5b94 12 *
d2145cde
AK
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
6b7c5b94
SP
16 */
17
18#ifndef BE_H
19#define BE_H
20
21#include <linux/pci.h>
22#include <linux/etherdevice.h>
6b7c5b94
SP
23#include <linux/delay.h>
24#include <net/tcp.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27#include <linux/if_vlan.h>
28#include <linux/workqueue.h>
29#include <linux/interrupt.h>
84517482 30#include <linux/firmware.h>
5a0e3ad6 31#include <linux/slab.h>
ab1594e9 32#include <linux/u64_stats_sync.h>
6b7c5b94
SP
33
34#include "be_hw.h"
045508a8 35#include "be_roce.h"
6b7c5b94 36
d52afde9 37#define DRV_VER "10.2u"
6b7c5b94 38#define DRV_NAME "be2net"
00d3d51e
SB
39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "Emulex BladeEngine3"
41#define OC_NAME "Emulex OneConnect"
fe6d2a38
SP
42#define OC_NAME_BE OC_NAME "(be3)"
43#define OC_NAME_LANCER OC_NAME "(Lancer)"
ecedb6ae 44#define OC_NAME_SH OC_NAME "(Skyhawk)"
f3effb45 45#define DRV_DESC "Emulex OneConnect NIC Driver"
6b7c5b94 46
c4ca2374 47#define BE_VENDOR_ID 0x19a2
fe6d2a38 48#define EMULEX_VENDOR_ID 0x10df
c4ca2374 49#define BE_DEVICE_ID1 0x211
12d7ea2c 50#define BE_DEVICE_ID2 0x221
fe6d2a38
SP
51#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
52#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
53#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
12f4d0a8 54#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
ecedb6ae 55#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
76b73530 56#define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */
4762f6ce
AK
57#define OC_SUBSYS_DEVICE_ID1 0xE602
58#define OC_SUBSYS_DEVICE_ID2 0xE642
59#define OC_SUBSYS_DEVICE_ID3 0xE612
60#define OC_SUBSYS_DEVICE_ID4 0xE652
c4ca2374
AK
61
62static inline char *nic_name(struct pci_dev *pdev)
63{
12d7ea2c
AK
64 switch (pdev->device) {
65 case OC_DEVICE_ID1:
c4ca2374 66 return OC_NAME;
e254f6ec 67 case OC_DEVICE_ID2:
fe6d2a38
SP
68 return OC_NAME_BE;
69 case OC_DEVICE_ID3:
12f4d0a8 70 case OC_DEVICE_ID4:
fe6d2a38 71 return OC_NAME_LANCER;
12d7ea2c
AK
72 case BE_DEVICE_ID2:
73 return BE3_NAME;
ecedb6ae 74 case OC_DEVICE_ID5:
76b73530 75 case OC_DEVICE_ID6:
ecedb6ae 76 return OC_NAME_SH;
12d7ea2c 77 default:
c4ca2374 78 return BE_NAME;
12d7ea2c 79 }
c4ca2374
AK
80}
81
6b7c5b94 82/* Number of bytes of an RX frame that are copied to skb->data */
2e588f84 83#define BE_HDR_LEN ((u16) 64)
bb349bb4
ED
84/* allocate extra space to allow tunneling decapsulation without head reallocation */
85#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)
86
6b7c5b94
SP
87#define BE_MAX_JUMBO_FRAME_SIZE 9018
88#define BE_MIN_MTU 256
89
90#define BE_NUM_VLANS_SUPPORTED 64
2632bafd 91#define BE_MAX_EQD 128u
6b7c5b94
SP
92#define BE_MAX_TX_FRAG_COUNT 30
93
94#define EVNT_Q_LEN 1024
95#define TX_Q_LEN 2048
96#define TX_CQ_LEN 1024
97#define RX_Q_LEN 1024 /* Does not support any other value */
98#define RX_CQ_LEN 1024
5fb379ee 99#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
6b7c5b94
SP
100#define MCC_CQ_LEN 256
101
10ef9ab4 102#define BE2_MAX_RSS_QS 4
68d7bdcb
SP
103#define BE3_MAX_RSS_QS 16
104#define BE3_MAX_TX_QS 16
105#define BE3_MAX_EVT_QS 16
e3dc867c 106#define BE3_SRIOV_MAX_EVT_QS 8
68d7bdcb
SP
107
108#define MAX_RX_QS 32
109#define MAX_EVT_QS 32
110#define MAX_TX_QS 32
10ef9ab4 111
045508a8 112#define MAX_ROCE_EQS 5
68d7bdcb 113#define MAX_MSIX_VECTORS 32
92bf14ab 114#define MIN_MSIX_VECTORS 1
10ef9ab4 115#define BE_TX_BUDGET 256
6b7c5b94 116#define BE_NAPI_WEIGHT 64
10ef9ab4 117#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
6b7c5b94
SP
118#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
119
7c5a5242 120#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
8788fdc2
SP
121#define FW_VER_LEN 32
122
6b7c5b94
SP
123struct be_dma_mem {
124 void *va;
125 dma_addr_t dma;
126 u32 size;
127};
128
129struct be_queue_info {
130 struct be_dma_mem dma_mem;
131 u16 len;
132 u16 entry_size; /* Size of an element in the queue */
133 u16 id;
134 u16 tail, head;
135 bool created;
136 atomic_t used; /* Number of valid elements in the queue */
137};
138
5fb379ee
SP
139static inline u32 MODULO(u16 val, u16 limit)
140{
141 BUG_ON(limit & (limit - 1));
142 return val & (limit - 1);
143}
144
145static inline void index_adv(u16 *index, u16 val, u16 limit)
146{
147 *index = MODULO((*index + val), limit);
148}
149
150static inline void index_inc(u16 *index, u16 limit)
151{
152 *index = MODULO((*index + 1), limit);
153}
154
155static inline void *queue_head_node(struct be_queue_info *q)
156{
157 return q->dma_mem.va + q->head * q->entry_size;
158}
159
160static inline void *queue_tail_node(struct be_queue_info *q)
161{
162 return q->dma_mem.va + q->tail * q->entry_size;
163}
164
3de09455
SK
165static inline void *queue_index_node(struct be_queue_info *q, u16 index)
166{
167 return q->dma_mem.va + index * q->entry_size;
168}
169
5fb379ee
SP
170static inline void queue_head_inc(struct be_queue_info *q)
171{
172 index_inc(&q->head, q->len);
173}
174
652bf646
PR
175static inline void index_dec(u16 *index, u16 limit)
176{
177 *index = MODULO((*index - 1), limit);
178}
179
5fb379ee
SP
180static inline void queue_tail_inc(struct be_queue_info *q)
181{
182 index_inc(&q->tail, q->len);
183}
184
5fb379ee
SP
185struct be_eq_obj {
186 struct be_queue_info q;
187 char desc[32];
188
189 /* Adaptive interrupt coalescing (AIC) info */
190 bool enable_aic;
10ef9ab4
SP
191 u32 min_eqd; /* in usecs */
192 u32 max_eqd; /* in usecs */
193 u32 eqd; /* configured val when aic is off */
194 u32 cur_eqd; /* in usecs */
5fb379ee 195
10ef9ab4 196 u8 idx; /* array index */
f2f781a7 197 u8 msix_idx;
10ef9ab4 198 u16 tx_budget;
d0b9cec3 199 u16 spurious_intr;
5fb379ee 200 struct napi_struct napi;
10ef9ab4 201 struct be_adapter *adapter;
6384a4d0
SP
202
203#ifdef CONFIG_NET_RX_BUSY_POLL
204#define BE_EQ_IDLE 0
205#define BE_EQ_NAPI 1 /* napi owns this EQ */
206#define BE_EQ_POLL 2 /* poll owns this EQ */
207#define BE_EQ_LOCKED (BE_EQ_NAPI | BE_EQ_POLL)
208#define BE_EQ_NAPI_YIELD 4 /* napi yielded this EQ */
209#define BE_EQ_POLL_YIELD 8 /* poll yielded this EQ */
210#define BE_EQ_YIELD (BE_EQ_NAPI_YIELD | BE_EQ_POLL_YIELD)
211#define BE_EQ_USER_PEND (BE_EQ_POLL | BE_EQ_POLL_YIELD)
212 unsigned int state;
213 spinlock_t lock; /* lock to serialize napi and busy-poll */
214#endif /* CONFIG_NET_RX_BUSY_POLL */
10ef9ab4 215} ____cacheline_aligned_in_smp;
5fb379ee 216
2632bafd
SP
217struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
218 bool enable;
219 u32 min_eqd; /* in usecs */
220 u32 max_eqd; /* in usecs */
221 u32 prev_eqd; /* in usecs */
222 u32 et_eqd; /* configured val when aic is off */
223 ulong jiffies;
224 u64 rx_pkts_prev; /* Used to calculate RX pps */
225 u64 tx_reqs_prev; /* Used to calculate TX pps */
226};
227
6384a4d0
SP
228enum {
229 NAPI_POLLING,
230 BUSY_POLLING
231};
232
5fb379ee
SP
233struct be_mcc_obj {
234 struct be_queue_info q;
235 struct be_queue_info cq;
7a1e9b20 236 bool rearm_cq;
5fb379ee
SP
237};
238
3abcdeda 239struct be_tx_stats {
ac124ff9
SP
240 u64 tx_bytes;
241 u64 tx_pkts;
242 u64 tx_reqs;
243 u64 tx_wrbs;
244 u64 tx_compl;
245 ulong tx_jiffies;
246 u32 tx_stops;
bc617526 247 u32 tx_drv_drops; /* pkts dropped by driver */
ab1594e9
SP
248 struct u64_stats_sync sync;
249 struct u64_stats_sync sync_compl;
6b7c5b94
SP
250};
251
6b7c5b94 252struct be_tx_obj {
94d73aaa 253 u32 db_offset;
6b7c5b94
SP
254 struct be_queue_info q;
255 struct be_queue_info cq;
256 /* Remember the skbs that were transmitted */
257 struct sk_buff *sent_skb_list[TX_Q_LEN];
3c8def97 258 struct be_tx_stats stats;
10ef9ab4 259} ____cacheline_aligned_in_smp;
6b7c5b94
SP
260
261/* Struct to remember the pages posted for rx frags */
262struct be_rx_page_info {
263 struct page *page;
e50287be 264 /* set to page-addr for last frag of the page & frag-addr otherwise */
fac6da5b 265 DEFINE_DMA_UNMAP_ADDR(bus);
6b7c5b94 266 u16 page_offset;
e50287be 267 bool last_frag; /* last frag of the page */
6b7c5b94
SP
268};
269
3abcdeda 270struct be_rx_stats {
3abcdeda 271 u64 rx_bytes;
3abcdeda 272 u64 rx_pkts;
ac124ff9
SP
273 u32 rx_drops_no_skbs; /* skb allocation errors */
274 u32 rx_drops_no_frags; /* HW has no fetched frags */
275 u32 rx_post_fail; /* page post alloc failures */
ac124ff9 276 u32 rx_compl;
3abcdeda 277 u32 rx_mcast_pkts;
ac124ff9 278 u32 rx_compl_err; /* completions with err set */
ab1594e9 279 struct u64_stats_sync sync;
3abcdeda
SP
280};
281
2e588f84
SP
282struct be_rx_compl_info {
283 u32 rss_hash;
6709d952 284 u16 vlan_tag;
2e588f84 285 u16 pkt_size;
12004ae9 286 u16 port;
2e588f84
SP
287 u8 vlanf;
288 u8 num_rcvd;
289 u8 err;
290 u8 ipf;
291 u8 tcpf;
292 u8 udpf;
293 u8 ip_csum;
294 u8 l4_csum;
295 u8 ipv6;
f93f160b 296 u8 qnq;
2e588f84 297 u8 pkt_type;
e38b1706 298 u8 ip_frag;
2e588f84
SP
299};
300
6b7c5b94 301struct be_rx_obj {
3abcdeda 302 struct be_adapter *adapter;
6b7c5b94
SP
303 struct be_queue_info q;
304 struct be_queue_info cq;
2e588f84 305 struct be_rx_compl_info rxcp;
6b7c5b94 306 struct be_rx_page_info page_info_tbl[RX_Q_LEN];
3abcdeda
SP
307 struct be_rx_stats stats;
308 u8 rss_id;
309 bool rx_post_starved; /* Zero rx frags have been posted to BE */
10ef9ab4 310} ____cacheline_aligned_in_smp;
6b7c5b94 311
609ff3bb 312struct be_drv_stats {
9ae081c6 313 u32 be_on_die_temperature;
ac124ff9
SP
314 u32 eth_red_drops;
315 u32 rx_drops_no_pbuf;
316 u32 rx_drops_no_txpb;
317 u32 rx_drops_no_erx_descr;
318 u32 rx_drops_no_tpre_descr;
319 u32 rx_drops_too_many_frags;
ac124ff9
SP
320 u32 forwarded_packets;
321 u32 rx_drops_mtu;
322 u32 rx_crc_errors;
323 u32 rx_alignment_symbol_errors;
324 u32 rx_pause_frames;
325 u32 rx_priority_pause_frames;
326 u32 rx_control_frames;
327 u32 rx_in_range_errors;
328 u32 rx_out_range_errors;
329 u32 rx_frame_too_long;
18fb06a1 330 u32 rx_address_filtered;
ac124ff9
SP
331 u32 rx_dropped_too_small;
332 u32 rx_dropped_too_short;
333 u32 rx_dropped_header_too_small;
334 u32 rx_dropped_tcp_length;
335 u32 rx_dropped_runt;
336 u32 rx_ip_checksum_errs;
337 u32 rx_tcp_checksum_errs;
338 u32 rx_udp_checksum_errs;
339 u32 tx_pauseframes;
340 u32 tx_priority_pauseframes;
341 u32 tx_controlframes;
342 u32 rxpp_fifo_overflow_drop;
343 u32 rx_input_fifo_overflow_drop;
344 u32 pmem_fifo_overflow_drop;
345 u32 jabber_events;
461ae379
AK
346 u32 rx_roce_bytes_lsd;
347 u32 rx_roce_bytes_msd;
348 u32 rx_roce_frames;
349 u32 roce_drops_payload_len;
350 u32 roce_drops_crc;
609ff3bb
AK
351};
352
c502224e
SK
353/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
354#define BE_RESET_VLAN_TAG_ID 0xFFFF
355
64600ea5 356struct be_vf_cfg {
11ac75ed
SP
357 unsigned char mac_addr[ETH_ALEN];
358 int if_handle;
359 int pmac_id;
360 u16 vlan_tag;
361 u32 tx_rate;
bdce2ad7 362 u32 plink_tracking;
64600ea5
AK
363};
364
39f1d94d
SP
365enum vf_state {
366 ENABLED = 0,
367 ASSIGNED = 1
368};
369
b236916a 370#define BE_FLAGS_LINK_STATUS_INIT 1
191eb756 371#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
d9d604f8 372#define BE_FLAGS_VLAN_PROMISC (1 << 4)
04d3d624 373#define BE_FLAGS_NAPI_ENABLED (1 << 9)
fbc13f01
AK
374#define BE_UC_PMAC_COUNT 30
375#define BE_VF_UC_PMAC_COUNT 2
bc0c3405 376#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
b236916a 377
5c510811
SK
378/* Ethtool set_dump flags */
379#define LANCER_INITIATE_FW_DUMP 0x1
380
42f11cf2
AK
381struct phy_info {
382 u8 transceiver;
383 u8 autoneg;
384 u8 fc_autoneg;
385 u8 port_type;
386 u16 phy_type;
387 u16 interface_type;
388 u32 misc_params;
389 u16 auto_speeds_supported;
390 u16 fixed_speeds_supported;
391 int link_speed;
42f11cf2
AK
392 u32 dac_cable_len;
393 u32 advertising;
394 u32 supported;
395};
396
92bf14ab
SP
397struct be_resources {
398 u16 max_vfs; /* Total VFs "really" supported by FW/HW */
399 u16 max_mcast_mac;
400 u16 max_tx_qs;
401 u16 max_rss_qs;
402 u16 max_rx_qs;
403 u16 max_uc_mac; /* Max UC MACs programmable */
404 u16 max_vlans; /* Number of vlans supported */
405 u16 max_evt_qs;
406 u32 if_cap_flags;
407};
408
6b7c5b94
SP
409struct be_adapter {
410 struct pci_dev *pdev;
411 struct net_device *netdev;
412
c5b3ad4c 413 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
8788fdc2 414 u8 __iomem *db; /* Door Bell */
8788fdc2 415
2984961c 416 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
8788fdc2
SP
417 struct be_dma_mem mbox_mem;
418 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
419 * is stored for freeing purpose */
420 struct be_dma_mem mbox_mem_alloced;
421
422 struct be_mcc_obj mcc_obj;
423 spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
424 spinlock_t mcc_cq_lock;
6b7c5b94 425
92bf14ab
SP
426 u16 cfg_num_qs; /* configured via set-channels */
427 u16 num_evt_qs;
428 u16 num_msix_vec;
429 struct be_eq_obj eq_obj[MAX_EVT_QS];
10ef9ab4 430 struct msix_entry msix_entries[MAX_MSIX_VECTORS];
6b7c5b94
SP
431 bool isr_registered;
432
433 /* TX Rings */
92bf14ab 434 u16 num_tx_qs;
3c8def97 435 struct be_tx_obj tx_obj[MAX_TX_QS];
6b7c5b94
SP
436
437 /* Rx rings */
92bf14ab 438 u16 num_rx_qs;
10ef9ab4 439 struct be_rx_obj rx_obj[MAX_RX_QS];
6b7c5b94
SP
440 u32 big_page_size; /* Compounded page size shared by rx wrbs */
441
609ff3bb 442 struct be_drv_stats drv_stats;
2632bafd 443 struct be_aic_obj aic_obj[MAX_EVT_QS];
82903e4b 444 u16 vlans_added;
b738127d 445 u8 vlan_tag[VLAN_N_VID];
cc4ce020
SK
446 u8 vlan_prio_bmap; /* Available Priority BitMap */
447 u16 recommended_prio; /* Recommended Priority */
5b8821b7 448 struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
6b7c5b94 449
3abcdeda 450 struct be_dma_mem stats_cmd;
6b7c5b94
SP
451 /* Work queue used to perform periodic tasks like getting statistics */
452 struct delayed_work work;
609ff3bb 453 u16 work_counter;
6b7c5b94 454
f67ef7ba 455 struct delayed_work func_recovery_work;
b236916a 456 u32 flags;
f25b119c 457 u32 cmd_privileges;
6b7c5b94 458 /* Ethtool knobs and info */
6b7c5b94 459 char fw_ver[FW_VER_LEN];
eeb65ced 460 char fw_on_flash[FW_VER_LEN];
30128031 461 int if_handle; /* Used to configure filtering */
fbc13f01 462 u32 *pmac_id; /* MAC addr handle used by BE card */
1a642469 463 u32 beacon_state; /* for set_phys_id */
6b7c5b94 464
f67ef7ba 465 bool eeh_error;
6589ade0 466 bool fw_timeout;
f67ef7ba
PR
467 bool hw_error;
468
6b7c5b94 469 u32 port_num;
24307eef 470 bool promiscuous;
f93f160b 471 u8 mc_type;
3486be29 472 u32 function_mode;
3abcdeda 473 u32 function_caps;
9e90c961
AK
474 u32 rx_fc; /* Rx flow control */
475 u32 tx_fc; /* Tx flow control */
b2aebe6d 476 bool stats_cmd_sent;
045508a8 477 struct {
045508a8
PP
478 u32 size;
479 u32 total_size;
480 u64 io_addr;
481 } roce_db;
482 u32 num_msix_roce_vec;
483 struct ocrdma_dev *ocrdma_dev;
484 struct list_head entry;
485
dd131e76 486 u32 flash_status;
5eeff635 487 struct completion et_cmd_compl;
ba343c77 488
92bf14ab
SP
489 struct be_resources res; /* resources available for the func */
490 u16 num_vfs; /* Number of VFs provisioned by PF */
39f1d94d 491 u8 virtfn;
11ac75ed
SP
492 struct be_vf_cfg *vf_cfg;
493 bool be3_native;
fe6d2a38 494 u32 sli_family;
9e1453c5 495 u8 hba_port_num;
3968fa1e 496 u16 pvid;
42f11cf2 497 struct phy_info phy;
4762f6ce 498 u8 wol_cap;
76a9e08e 499 bool wol_en;
fbc13f01 500 u32 uc_macs; /* Count of secondary UC MAC programmed */
0ad3157e 501 u16 asic_rev;
bc0c3405 502 u16 qnq_vid;
941a77d5 503 u32 msg_enable;
7aeb2156 504 int be_get_temp_freq;
d5c18473 505 u8 pf_number;
594ad54a 506 u64 rss_flags;
6b7c5b94
SP
507};
508
39f1d94d 509#define be_physfn(adapter) (!adapter->virtfn)
2c7a9dc1 510#define be_virtfn(adapter) (adapter->virtfn)
11ac75ed 511#define sriov_enabled(adapter) (adapter->num_vfs > 0)
b905b5d4
VV
512#define sriov_want(adapter) (be_physfn(adapter) && \
513 (num_vfs || pci_num_vf(adapter->pdev)))
11ac75ed
SP
514#define for_all_vfs(adapter, vf_cfg, i) \
515 for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
516 i++, vf_cfg++)
ba343c77 517
5b8821b7
SP
518#define ON 1
519#define OFF 0
ca34fe38 520
92bf14ab
SP
521#define be_max_vlans(adapter) (adapter->res.max_vlans)
522#define be_max_uc(adapter) (adapter->res.max_uc_mac)
523#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
524#define be_max_vfs(adapter) (adapter->res.max_vfs)
525#define be_max_rss(adapter) (adapter->res.max_rss_qs)
526#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
527#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
528#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
529#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
530#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
531
532static inline u16 be_max_qs(struct be_adapter *adapter)
533{
534 /* If no RSS, need atleast the one def RXQ */
535 u16 num = max_t(u16, be_max_rss(adapter), 1);
536
537 num = min(num, be_max_eqs(adapter));
538 return min_t(u16, num, num_online_cpus());
539}
540
f93f160b
VV
541/* Is BE in pvid_tagging mode */
542#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
543
544/* Is BE in QNQ multi-channel mode */
545#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
546 adapter->mc_type == vNIC1 || \
547 adapter->mc_type == UFP)
548
ca34fe38
SP
549#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
550 adapter->pdev->device == OC_DEVICE_ID4)
fe6d2a38 551
76b73530
PR
552#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5 || \
553 adapter->pdev->device == OC_DEVICE_ID6)
d3bd3a5e 554
ca34fe38
SP
555#define BE3_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID2 || \
556 adapter->pdev->device == OC_DEVICE_ID2)
557
558#define BE2_chip(adapter) (adapter->pdev->device == BE_DEVICE_ID1 || \
559 adapter->pdev->device == OC_DEVICE_ID1)
560
561#define BEx_chip(adapter) (BE3_chip(adapter) || BE2_chip(adapter))
d3bd3a5e 562
dbf0f2a7
SP
563#define be_roce_supported(adapter) (skyhawk_chip(adapter) && \
564 (adapter->function_mode & RDMA_ENABLED))
045508a8 565
0fc0b732 566extern const struct ethtool_ops be_ethtool_ops;
6b7c5b94 567
ac6a0c4a 568#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
10ef9ab4
SP
569#define num_irqs(adapter) (msix_enabled(adapter) ? \
570 adapter->num_msix_vec : 1)
571#define tx_stats(txo) (&(txo)->stats)
572#define rx_stats(rxo) (&(rxo)->stats)
6b7c5b94 573
10ef9ab4
SP
574/* The default RXQ is the last RXQ */
575#define default_rxo(adpt) (&adpt->rx_obj[adpt->num_rx_qs - 1])
6b7c5b94 576
3abcdeda
SP
577#define for_all_rx_queues(adapter, rxo, i) \
578 for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs; \
579 i++, rxo++)
580
10ef9ab4 581/* Skip the default non-rss queue (last one)*/
3abcdeda 582#define for_all_rss_queues(adapter, rxo, i) \
10ef9ab4 583 for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
3abcdeda
SP
584 i++, rxo++)
585
3c8def97
SP
586#define for_all_tx_queues(adapter, txo, i) \
587 for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs; \
588 i++, txo++)
589
10ef9ab4
SP
590#define for_all_evt_queues(adapter, eqo, i) \
591 for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
592 i++, eqo++)
593
6384a4d0
SP
594#define for_all_rx_queues_on_eq(adapter, eqo, rxo, i) \
595 for (i = eqo->idx, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;\
596 i += adapter->num_evt_qs, rxo += adapter->num_evt_qs)
597
10ef9ab4
SP
598#define is_mcc_eqo(eqo) (eqo->idx == 0)
599#define mcc_eqo(adapter) (&adapter->eq_obj[0])
600
6b7c5b94
SP
601#define PAGE_SHIFT_4K 12
602#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
603
604/* Returns number of pages spanned by the data starting at the given addr */
605#define PAGES_4K_SPANNED(_address, size) \
606 ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
607 (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
608
6b7c5b94
SP
609/* Returns bit offset within a DWORD of a bitfield */
610#define AMAP_BIT_OFFSET(_struct, field) \
611 (((size_t)&(((_struct *)0)->field))%32)
612
613/* Returns the bit mask of the field that is NOT shifted into location. */
614static inline u32 amap_mask(u32 bitsize)
615{
616 return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
617}
618
619static inline void
620amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
621{
622 u32 *dw = (u32 *) ptr + dw_offset;
623 *dw &= ~(mask << offset);
624 *dw |= (mask & value) << offset;
625}
626
627#define AMAP_SET_BITS(_struct, field, ptr, val) \
628 amap_set(ptr, \
629 offsetof(_struct, field)/32, \
630 amap_mask(sizeof(((_struct *)0)->field)), \
631 AMAP_BIT_OFFSET(_struct, field), \
632 val)
633
634static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
635{
636 u32 *dw = (u32 *) ptr;
637 return mask & (*(dw + dw_offset) >> offset);
638}
639
640#define AMAP_GET_BITS(_struct, field, ptr) \
641 amap_get(ptr, \
642 offsetof(_struct, field)/32, \
643 amap_mask(sizeof(((_struct *)0)->field)), \
644 AMAP_BIT_OFFSET(_struct, field))
645
646#define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len)
647#define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len)
648static inline void swap_dws(void *wrb, int len)
649{
650#ifdef __BIG_ENDIAN
651 u32 *dw = wrb;
652 BUG_ON(len % 4);
653 do {
654 *dw = cpu_to_le32(*dw);
655 dw++;
656 len -= 4;
657 } while (len);
658#endif /* __BIG_ENDIAN */
659}
660
661static inline u8 is_tcp_pkt(struct sk_buff *skb)
662{
663 u8 val = 0;
664
665 if (ip_hdr(skb)->version == 4)
666 val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
667 else if (ip_hdr(skb)->version == 6)
668 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);
669
670 return val;
671}
672
673static inline u8 is_udp_pkt(struct sk_buff *skb)
674{
675 u8 val = 0;
676
677 if (ip_hdr(skb)->version == 4)
678 val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
679 else if (ip_hdr(skb)->version == 6)
680 val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);
681
682 return val;
683}
684
93040ae5
SK
685static inline bool is_ipv4_pkt(struct sk_buff *skb)
686{
e8efcec5 687 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
93040ae5
SK
688}
689
6d87f5c3
AK
690static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
691{
692 u32 addr;
693
694 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
695
696 mac[5] = (u8)(addr & 0xFF);
697 mac[4] = (u8)((addr >> 8) & 0xFF);
698 mac[3] = (u8)((addr >> 16) & 0xFF);
7a2414a5
AK
699 /* Use the OUI from the current MAC address */
700 memcpy(mac, adapter->netdev->dev_addr, 3);
6d87f5c3
AK
701}
702
4b972914
AK
703static inline bool be_multi_rxq(const struct be_adapter *adapter)
704{
705 return adapter->num_rx_qs > 1;
706}
707
6589ade0
SP
708static inline bool be_error(struct be_adapter *adapter)
709{
f67ef7ba
PR
710 return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
711}
712
d23e946c 713static inline bool be_hw_error(struct be_adapter *adapter)
f67ef7ba
PR
714{
715 return adapter->eeh_error || adapter->hw_error;
716}
717
718static inline void be_clear_all_error(struct be_adapter *adapter)
719{
720 adapter->eeh_error = false;
721 adapter->hw_error = false;
722 adapter->fw_timeout = false;
6589ade0
SP
723}
724
4762f6ce
AK
725static inline bool be_is_wol_excluded(struct be_adapter *adapter)
726{
727 struct pci_dev *pdev = adapter->pdev;
728
729 if (!be_physfn(adapter))
730 return true;
731
732 switch (pdev->subsystem_device) {
733 case OC_SUBSYS_DEVICE_ID1:
734 case OC_SUBSYS_DEVICE_ID2:
735 case OC_SUBSYS_DEVICE_ID3:
736 case OC_SUBSYS_DEVICE_ID4:
737 return true;
738 default:
739 return false;
740 }
741}
742
bc0c3405
AK
743static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
744{
745 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
746}
747
6384a4d0
SP
748#ifdef CONFIG_NET_RX_BUSY_POLL
749static inline bool be_lock_napi(struct be_eq_obj *eqo)
750{
751 bool status = true;
752
753 spin_lock(&eqo->lock); /* BH is already disabled */
754 if (eqo->state & BE_EQ_LOCKED) {
755 WARN_ON(eqo->state & BE_EQ_NAPI);
756 eqo->state |= BE_EQ_NAPI_YIELD;
757 status = false;
758 } else {
759 eqo->state = BE_EQ_NAPI;
760 }
761 spin_unlock(&eqo->lock);
762 return status;
763}
764
765static inline void be_unlock_napi(struct be_eq_obj *eqo)
766{
767 spin_lock(&eqo->lock); /* BH is already disabled */
768
769 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
770 eqo->state = BE_EQ_IDLE;
771
772 spin_unlock(&eqo->lock);
773}
774
775static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
776{
777 bool status = true;
778
779 spin_lock_bh(&eqo->lock);
780 if (eqo->state & BE_EQ_LOCKED) {
781 eqo->state |= BE_EQ_POLL_YIELD;
782 status = false;
783 } else {
784 eqo->state |= BE_EQ_POLL;
785 }
786 spin_unlock_bh(&eqo->lock);
787 return status;
788}
789
790static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
791{
792 spin_lock_bh(&eqo->lock);
793
794 WARN_ON(eqo->state & (BE_EQ_NAPI));
795 eqo->state = BE_EQ_IDLE;
796
797 spin_unlock_bh(&eqo->lock);
798}
799
800static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
801{
802 spin_lock_init(&eqo->lock);
803 eqo->state = BE_EQ_IDLE;
804}
805
806static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
807{
808 local_bh_disable();
809
810 /* It's enough to just acquire napi lock on the eqo to stop
811 * be_busy_poll() from processing any queueus.
812 */
813 while (!be_lock_napi(eqo))
814 mdelay(1);
815
816 local_bh_enable();
817}
818
819#else /* CONFIG_NET_RX_BUSY_POLL */
820
821static inline bool be_lock_napi(struct be_eq_obj *eqo)
822{
823 return true;
824}
825
826static inline void be_unlock_napi(struct be_eq_obj *eqo)
827{
828}
829
830static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
831{
832 return false;
833}
834
835static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
836{
837}
838
839static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
840{
841}
842
843static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
844{
845}
846#endif /* CONFIG_NET_RX_BUSY_POLL */
847
31886e87
JP
848void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
849 u16 num_popped);
850void be_link_status_update(struct be_adapter *adapter, u8 link_status);
851void be_parse_stats(struct be_adapter *adapter);
852int be_load_fw(struct be_adapter *adapter, u8 *func);
853bool be_is_wol_supported(struct be_adapter *adapter);
854bool be_pause_supported(struct be_adapter *adapter);
855u32 be_get_fw_log_level(struct be_adapter *adapter);
394efd19 856
e9e2a904
SK
857static inline int fw_major_num(const char *fw_ver)
858{
859 int fw_major = 0;
860
861 sscanf(fw_ver, "%d.", &fw_major);
862
863 return fw_major;
864}
865
68d7bdcb
SP
866int be_update_queues(struct be_adapter *adapter);
867int be_poll(struct napi_struct *napi, int budget);
941a77d5 868
045508a8
PP
869/*
870 * internal function to initialize-cleanup roce device.
871 */
31886e87
JP
872void be_roce_dev_add(struct be_adapter *);
873void be_roce_dev_remove(struct be_adapter *);
045508a8
PP
874
875/*
876 * internal function to open-close roce device during ifup-ifdown.
877 */
31886e87
JP
878void be_roce_dev_open(struct be_adapter *);
879void be_roce_dev_close(struct be_adapter *);
045508a8 880
6b7c5b94 881#endif /* BE_H */
This page took 0.737533 seconds and 5 git commands to generate.