net/mlx4_core: Port aggregation upper layer interface
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx4 / mlx4.h
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
51a379d0 5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 */
36
37#ifndef MLX4_H
38#define MLX4_H
39
525f5f44 40#include <linux/mutex.h>
225c7b1f 41#include <linux/radix-tree.h>
4af1c048 42#include <linux/rbtree.h>
ee49bd93 43#include <linux/timer.h>
3142788b 44#include <linux/semaphore.h>
27bf91d6 45#include <linux/workqueue.h>
3dca0f42
MB
46#include <linux/interrupt.h>
47#include <linux/spinlock.h>
225c7b1f
RD
48
49#include <linux/mlx4/device.h>
37608eea 50#include <linux/mlx4/driver.h>
225c7b1f 51#include <linux/mlx4/doorbell.h>
623ed84b 52#include <linux/mlx4/cmd.h>
225c7b1f
RD
53
54#define DRV_NAME "mlx4_core"
ab9c17a0 55#define PFX DRV_NAME ": "
169a1d85
AV
56#define DRV_VERSION "2.2-1"
57#define DRV_RELDATE "Feb, 2014"
225c7b1f 58
0ff1fb65
HHZ
59#define MLX4_FS_UDP_UC_EN (1 << 1)
60#define MLX4_FS_TCP_UC_EN (1 << 2)
61#define MLX4_FS_NUM_OF_L2_ADDR 8
62#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
63#define MLX4_FS_NUM_MCG (1 << 17)
64
e448834e
SM
65#define INIT_HCA_TPT_MW_ENABLE (1 << 7)
66
e5395e92
AV
67struct mlx4_set_port_prio2tc_context {
68 u8 prio2tc[4];
69};
70
71struct mlx4_port_scheduler_tc_cfg_be {
72 __be16 pg;
73 __be16 bw_precentage;
74 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
75 __be16 max_bw_value;
76};
77
78struct mlx4_set_port_scheduler_context {
79 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
80};
81
225c7b1f
RD
82enum {
83 MLX4_HCR_BASE = 0x80680,
84 MLX4_HCR_SIZE = 0x0001c,
623ed84b
JM
85 MLX4_CLR_INT_SIZE = 0x00008,
86 MLX4_SLAVE_COMM_BASE = 0x0,
ddd8a6c1 87 MLX4_COMM_PAGESIZE = 0x1000,
55ad3592
YH
88 MLX4_CLOCK_SIZE = 0x00008,
89 MLX4_COMM_CHAN_CAPS = 0x8,
90 MLX4_COMM_CHAN_FLAGS = 0xc
225c7b1f
RD
91};
92
225c7b1f 93enum {
3c439b55
JM
94 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10,
95 MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7,
96 MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12,
97 MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2),
0ec2c0f8 98 MLX4_MTT_ENTRY_PER_SEG = 8,
225c7b1f
RD
99};
100
225c7b1f
RD
101enum {
102 MLX4_NUM_PDS = 1 << 15
103};
104
105enum {
106 MLX4_CMPT_TYPE_QP = 0,
107 MLX4_CMPT_TYPE_SRQ = 1,
108 MLX4_CMPT_TYPE_CQ = 2,
109 MLX4_CMPT_TYPE_EQ = 3,
110 MLX4_CMPT_NUM_TYPE
111};
112
113enum {
114 MLX4_CMPT_SHIFT = 24,
115 MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
116};
117
b20e519a
SM
118enum mlx4_mpt_state {
119 MLX4_MPT_DISABLED = 0,
120 MLX4_MPT_EN_HW,
121 MLX4_MPT_EN_SW
623ed84b
JM
122};
123
124#define MLX4_COMM_TIME 10000
55ad3592 125#define MLX4_COMM_OFFLINE_TIME_OUT 30000
0cd93027
YH
126#define MLX4_COMM_CMD_NA_OP 0x0
127
55ad3592 128
623ed84b
JM
129enum {
130 MLX4_COMM_CMD_RESET,
131 MLX4_COMM_CMD_VHCR0,
132 MLX4_COMM_CMD_VHCR1,
133 MLX4_COMM_CMD_VHCR2,
134 MLX4_COMM_CMD_VHCR_EN,
135 MLX4_COMM_CMD_VHCR_POST,
136 MLX4_COMM_CMD_FLR = 254
137};
138
99ec41d0
JM
139enum {
140 MLX4_VF_SMI_DISABLED,
141 MLX4_VF_SMI_ENABLED
142};
143
623ed84b
JM
144/*The flag indicates that the slave should delay the RESET cmd*/
145#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
146/*indicates how many retries will be done if we are in the middle of FLR*/
147#define NUM_OF_RESET_RETRIES 10
148#define SLEEP_TIME_IN_RESET (2 * 1000)
149enum mlx4_resource {
150 RES_QP,
151 RES_CQ,
152 RES_SRQ,
153 RES_XRCD,
154 RES_MPT,
155 RES_MTT,
156 RES_MAC,
157 RES_VLAN,
158 RES_EQ,
159 RES_COUNTER,
1b9c6b06 160 RES_FS_RULE,
623ed84b
JM
161 MLX4_NUM_OF_RESOURCE_TYPE
162};
163
164enum mlx4_alloc_mode {
165 RES_OP_RESERVE,
166 RES_OP_RESERVE_AND_MAP,
167 RES_OP_MAP_ICM,
168};
169
b8924951
JM
170enum mlx4_res_tracker_free_type {
171 RES_TR_FREE_ALL,
172 RES_TR_FREE_SLAVES_ONLY,
173 RES_TR_FREE_STRUCTS_ONLY,
174};
623ed84b
JM
175
176/*
177 *Virtual HCR structures.
178 * mlx4_vhcr is the sw representation, in machine endianess
179 *
180 * mlx4_vhcr_cmd is the formalized structure, the one that is passed
181 * to FW to go through communication channel.
182 * It is big endian, and has the same structure as the physical HCR
183 * used by command interface
184 */
185struct mlx4_vhcr {
186 u64 in_param;
187 u64 out_param;
188 u32 in_modifier;
189 u32 errno;
190 u16 op;
191 u16 token;
192 u8 op_modifier;
193 u8 e_bit;
194};
195
196struct mlx4_vhcr_cmd {
197 __be64 in_param;
198 __be32 in_modifier;
dc7d5004 199 u32 reserved1;
623ed84b
JM
200 __be64 out_param;
201 __be16 token;
202 u16 reserved;
203 u8 status;
204 u8 flags;
205 __be16 opcode;
206};
207
208struct mlx4_cmd_info {
209 u16 opcode;
210 bool has_inbox;
211 bool has_outbox;
212 bool out_is_imm;
213 bool encode_slave_id;
214 int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
215 struct mlx4_cmd_mailbox *inbox);
216 int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
217 struct mlx4_cmd_mailbox *inbox,
218 struct mlx4_cmd_mailbox *outbox,
219 struct mlx4_cmd_info *cmd);
220};
221
225c7b1f
RD
222#ifdef CONFIG_MLX4_DEBUG
223extern int mlx4_debug_level;
7b0f5df4
RD
224#else /* CONFIG_MLX4_DEBUG */
225#define mlx4_debug_level (0)
226#endif /* CONFIG_MLX4_DEBUG */
225c7b1f 227
1a91de28 228#define mlx4_dbg(mdev, format, ...) \
0a645e80
JP
229do { \
230 if (mlx4_debug_level) \
872bf2fb
YH
231 dev_printk(KERN_DEBUG, \
232 &(mdev)->persist->pdev->dev, format, \
1a91de28 233 ##__VA_ARGS__); \
0a645e80 234} while (0)
225c7b1f 235
1a91de28 236#define mlx4_err(mdev, format, ...) \
872bf2fb 237 dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
1a91de28 238#define mlx4_info(mdev, format, ...) \
872bf2fb 239 dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
1a91de28 240#define mlx4_warn(mdev, format, ...) \
872bf2fb 241 dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
225c7b1f 242
0ec2c0f8 243extern int mlx4_log_num_mgm_entry_size;
2b8fb286 244extern int log_mtts_per_seg;
f5aef5aa 245extern int mlx4_internal_err_reset;
0ec2c0f8 246
623ed84b
JM
247#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
248#define ALL_SLAVES 0xff
249
225c7b1f
RD
250struct mlx4_bitmap {
251 u32 last;
252 u32 top;
253 u32 max;
93fc9e1b 254 u32 reserved_top;
225c7b1f 255 u32 mask;
42d1e017 256 u32 avail;
7a89399f 257 u32 effective_len;
225c7b1f
RD
258 spinlock_t lock;
259 unsigned long *table;
260};
261
262struct mlx4_buddy {
263 unsigned long **bits;
e4044cfc 264 unsigned int *num_free;
3de819e6 265 u32 max_order;
225c7b1f
RD
266 spinlock_t lock;
267};
268
269struct mlx4_icm;
270
271struct mlx4_icm_table {
272 u64 virt;
273 int num_icm;
3de819e6 274 u32 num_obj;
225c7b1f
RD
275 int obj_size;
276 int lowmem;
5b0bf5e2 277 int coherent;
225c7b1f
RD
278 struct mutex mutex;
279 struct mlx4_icm **icm;
280};
281
cc1ade94
SM
282#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
283#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
284#define MLX4_MPT_FLAG_MIO (1 << 17)
285#define MLX4_MPT_FLAG_BIND_ENABLE (1 << 15)
286#define MLX4_MPT_FLAG_PHYSICAL (1 << 9)
287#define MLX4_MPT_FLAG_REGION (1 << 8)
288
e630664c
MB
289#define MLX4_MPT_PD_MASK (0x1FFFFUL)
290#define MLX4_MPT_PD_VF_MASK (0xFE0000UL)
cc1ade94
SM
291#define MLX4_MPT_PD_FLAG_FAST_REG (1 << 27)
292#define MLX4_MPT_PD_FLAG_RAE (1 << 28)
293#define MLX4_MPT_PD_FLAG_EN_INV (3 << 24)
294
295#define MLX4_MPT_QP_FLAG_BOUND_QP (1 << 7)
296
297#define MLX4_MPT_STATUS_SW 0xF0
298#define MLX4_MPT_STATUS_HW 0x00
299
77507aa2
IS
300#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
301#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
302
c82e9aa0
EC
303/*
304 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
305 */
306struct mlx4_mpt_entry {
307 __be32 flags;
308 __be32 qpn;
309 __be32 key;
310 __be32 pd_flags;
311 __be64 start;
312 __be64 length;
313 __be32 lkey;
314 __be32 win_cnt;
315 u8 reserved1[3];
316 u8 mtt_rep;
2b8fb286 317 __be64 mtt_addr;
c82e9aa0
EC
318 __be32 mtt_sz;
319 __be32 entity_size;
320 __be32 first_byte_offset;
321} __packed;
322
323/*
324 * Must be packed because start is 64 bits but only aligned to 32 bits.
325 */
326struct mlx4_eq_context {
327 __be32 flags;
328 u16 reserved1[3];
329 __be16 page_offset;
330 u8 log_eq_size;
331 u8 reserved2[4];
332 u8 eq_period;
333 u8 reserved3;
334 u8 eq_max_count;
335 u8 reserved4[3];
336 u8 intr;
337 u8 log_page_size;
338 u8 reserved5[2];
339 u8 mtt_base_addr_h;
340 __be32 mtt_base_addr_l;
341 u32 reserved6[2];
342 __be32 consumer_index;
343 __be32 producer_index;
344 u32 reserved7[4];
345};
346
347struct mlx4_cq_context {
348 __be32 flags;
349 u16 reserved1[3];
350 __be16 page_offset;
351 __be32 logsize_usrpage;
352 __be16 cq_period;
353 __be16 cq_max_count;
354 u8 reserved2[3];
355 u8 comp_eqn;
356 u8 log_page_size;
357 u8 reserved3[2];
358 u8 mtt_base_addr_h;
359 __be32 mtt_base_addr_l;
360 __be32 last_notified_index;
361 __be32 solicit_producer_index;
362 __be32 consumer_index;
363 __be32 producer_index;
364 u32 reserved4[2];
365 __be64 db_rec_addr;
366};
367
368struct mlx4_srq_context {
369 __be32 state_logsize_srqn;
370 u8 logstride;
371 u8 reserved1;
372 __be16 xrcd;
373 __be32 pg_offset_cqn;
374 u32 reserved2;
375 u8 log_page_size;
376 u8 reserved3[2];
377 u8 mtt_base_addr_h;
378 __be32 mtt_base_addr_l;
379 __be32 pd;
380 __be16 limit_watermark;
381 __be16 wqe_cnt;
382 u16 reserved4;
383 __be16 wqe_counter;
384 u32 reserved5;
385 __be64 db_rec_addr;
386};
387
3dca0f42
MB
388struct mlx4_eq_tasklet {
389 struct list_head list;
390 struct list_head process_list;
391 struct tasklet_struct task;
392 /* lock on completion tasklet list */
393 spinlock_t lock;
394};
395
225c7b1f
RD
396struct mlx4_eq {
397 struct mlx4_dev *dev;
398 void __iomem *doorbell;
399 int eqn;
400 u32 cons_index;
401 u16 irq;
402 u16 have_irq;
403 int nent;
404 struct mlx4_buf_list *page_list;
405 struct mlx4_mtt mtt;
3dca0f42 406 struct mlx4_eq_tasklet tasklet_ctx;
225c7b1f
RD
407};
408
623ed84b
JM
409struct mlx4_slave_eqe {
410 u8 type;
411 u8 port;
412 u32 param;
413};
414
415struct mlx4_slave_event_eq_info {
803143fb 416 int eqn;
623ed84b 417 u16 token;
623ed84b
JM
418};
419
225c7b1f
RD
420struct mlx4_profile {
421 int num_qp;
422 int rdmarc_per_qp;
423 int num_srq;
424 int num_cq;
425 int num_mcg;
426 int num_mpt;
db5a7a65 427 unsigned num_mtt;
225c7b1f
RD
428};
429
430struct mlx4_fw {
431 u64 clr_int_base;
432 u64 catas_offset;
623ed84b 433 u64 comm_base;
ddd8a6c1 434 u64 clock_offset;
225c7b1f
RD
435 struct mlx4_icm *fw_icm;
436 struct mlx4_icm *aux_icm;
437 u32 catas_size;
438 u16 fw_pages;
439 u8 clr_int_bar;
440 u8 catas_bar;
623ed84b 441 u8 comm_bar;
ddd8a6c1 442 u8 clock_bar;
623ed84b
JM
443};
444
445struct mlx4_comm {
446 u32 slave_write;
447 u32 slave_read;
225c7b1f
RD
448};
449
ffe455ad
EE
450enum {
451 MLX4_MCAST_CONFIG = 0,
452 MLX4_MCAST_DISABLE = 1,
453 MLX4_MCAST_ENABLE = 2,
454};
455
623ed84b
JM
456#define VLAN_FLTR_SIZE 128
457
458struct mlx4_vlan_fltr {
459 __be32 entry[VLAN_FLTR_SIZE];
460};
461
ffe455ad
EE
462struct mlx4_mcast_entry {
463 struct list_head list;
464 u64 addr;
465};
466
b12d93d6
YP
467struct mlx4_promisc_qp {
468 struct list_head list;
469 u32 qpn;
470};
471
472struct mlx4_steer_index {
473 struct list_head list;
474 unsigned int index;
475 struct list_head duplicates;
476};
477
803143fb
MA
478#define MLX4_EVENT_TYPES_NUM 64
479
623ed84b
JM
480struct mlx4_slave_state {
481 u8 comm_toggle;
482 u8 last_cmd;
483 u8 init_port_mask;
484 bool active;
2c957ff2 485 bool old_vlan_api;
623ed84b
JM
486 u8 function;
487 dma_addr_t vhcr_dma;
488 u16 mtu[MLX4_MAX_PORTS + 1];
489 __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
490 struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
491 struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
492 struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
803143fb
MA
493 /* event type to eq number lookup */
494 struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM];
623ed84b
JM
495 u16 eq_pi;
496 u16 eq_ci;
497 spinlock_t lock;
498 /*initialized via the kzalloc*/
499 u8 is_slave_going_down;
500 u32 cookie;
993c401e 501 enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
623ed84b
JM
502};
503
0eb62b93
RE
504#define MLX4_VGT 4095
505#define NO_INDX (-1)
506
507struct mlx4_vport_state {
508 u64 mac;
509 u16 default_vlan;
510 u8 default_qos;
511 u32 tx_rate;
512 bool spoofchk;
948e306d 513 u32 link_state;
0eb62b93
RE
514};
515
516struct mlx4_vf_admin_state {
517 struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
99ec41d0 518 u8 enable_smi[MLX4_MAX_PORTS + 1];
0eb62b93
RE
519};
520
521struct mlx4_vport_oper_state {
522 struct mlx4_vport_state state;
523 int mac_idx;
524 int vlan_idx;
525};
99ec41d0 526
0eb62b93
RE
527struct mlx4_vf_oper_state {
528 struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
99ec41d0 529 u8 smi_enabled[MLX4_MAX_PORTS + 1];
0eb62b93
RE
530};
531
623ed84b
JM
532struct slave_list {
533 struct mutex mutex;
534 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
535};
536
5a0d0a61 537struct resource_allocator {
146f3ef4 538 spinlock_t alloc_lock; /* protect quotas */
5a0d0a61
JM
539 union {
540 int res_reserved;
541 int res_port_rsvd[MLX4_MAX_PORTS];
542 };
543 union {
544 int res_free;
545 int res_port_free[MLX4_MAX_PORTS];
546 };
547 int *quota;
548 int *allocated;
549 int *guaranteed;
550};
551
623ed84b
JM
552struct mlx4_resource_tracker {
553 spinlock_t lock;
554 /* tree for each resources */
4af1c048 555 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
623ed84b
JM
556 /* num_of_slave's lists, one per slave */
557 struct slave_list *slave_list;
5a0d0a61 558 struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE];
623ed84b
JM
559};
560
561#define SLAVE_EVENT_EQ_SIZE 128
562struct mlx4_slave_event_eq {
563 u32 eqn;
564 u32 cons;
565 u32 prod;
992e8e6e 566 spinlock_t event_lock;
623ed84b
JM
567 struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
568};
569
570struct mlx4_master_qp0_state {
571 int proxy_qp0_active;
572 int qp0_active;
573 int port_active;
574};
575
576struct mlx4_mfunc_master_ctx {
577 struct mlx4_slave_state *slave_state;
0eb62b93
RE
578 struct mlx4_vf_admin_state *vf_admin;
579 struct mlx4_vf_oper_state *vf_oper;
623ed84b
JM
580 struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
581 int init_port_ref[MLX4_MAX_PORTS + 1];
582 u16 max_mtu[MLX4_MAX_PORTS + 1];
583 int disable_mcast_ref[MLX4_MAX_PORTS + 1];
584 struct mlx4_resource_tracker res_tracker;
585 struct workqueue_struct *comm_wq;
586 struct work_struct comm_work;
587 struct work_struct slave_event_work;
588 struct work_struct slave_flr_event_work;
589 spinlock_t slave_state_lock;
f5311ac1 590 __be32 comm_arm_bit_vector[4];
623ed84b
JM
591 struct mlx4_eqe cmd_eqe;
592 struct mlx4_slave_event_eq slave_eq;
593 struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
594};
595
596struct mlx4_mfunc {
597 struct mlx4_comm __iomem *comm;
598 struct mlx4_vhcr_cmd *vhcr;
599 dma_addr_t vhcr_dma;
600
601 struct mlx4_mfunc_master_ctx master;
602};
603
fe6f700d
YP
604#define MGM_QPN_MASK 0x00FFFFFF
605#define MGM_BLCK_LB_BIT 30
606
607struct mlx4_mgm {
608 __be32 next_gid_index;
609 __be32 members_count;
610 u32 reserved[2];
611 u8 gid[16];
612 __be32 qp[MLX4_MAX_QP_PER_MGM];
613};
614
225c7b1f
RD
615struct mlx4_cmd {
616 struct pci_pool *pool;
617 void __iomem *hcr;
f3d4c89e 618 struct mutex slave_cmd_mutex;
225c7b1f
RD
619 struct semaphore poll_sem;
620 struct semaphore event_sem;
621 int max_cmds;
622 spinlock_t context_lock;
623 int free_head;
624 struct mlx4_cmd_context *context;
625 u16 token_mask;
626 u8 use_events;
627 u8 toggle;
623ed84b 628 u8 comm_toggle;
ffc39f6d 629 u8 initialized;
225c7b1f
RD
630};
631
b01978ca
JM
632enum {
633 MLX4_VF_IMMED_VLAN_FLAG_VLAN = 1 << 0,
634 MLX4_VF_IMMED_VLAN_FLAG_QOS = 1 << 1,
0a6eac24 635 MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE = 1 << 2,
b01978ca
JM
636};
637struct mlx4_vf_immed_vlan_work {
638 struct work_struct work;
639 struct mlx4_priv *priv;
640 int flags;
641 int slave;
642 int vlan_ix;
643 int orig_vlan_ix;
644 u8 port;
645 u8 qos;
646 u16 vlan_id;
647 u16 orig_vlan_id;
648};
649
650
225c7b1f
RD
651struct mlx4_uar_table {
652 struct mlx4_bitmap bitmap;
653};
654
655struct mlx4_mr_table {
656 struct mlx4_bitmap mpt_bitmap;
657 struct mlx4_buddy mtt_buddy;
658 u64 mtt_base;
659 u64 mpt_base;
660 struct mlx4_icm_table mtt_table;
661 struct mlx4_icm_table dmpt_table;
662};
663
664struct mlx4_cq_table {
665 struct mlx4_bitmap bitmap;
666 spinlock_t lock;
667 struct radix_tree_root tree;
668 struct mlx4_icm_table table;
669 struct mlx4_icm_table cmpt_table;
670};
671
672struct mlx4_eq_table {
673 struct mlx4_bitmap bitmap;
b8dd786f 674 char *irq_names;
225c7b1f 675 void __iomem *clr_int;
b8dd786f 676 void __iomem **uar_map;
225c7b1f 677 u32 clr_mask;
b8dd786f 678 struct mlx4_eq *eq;
fa0681d2 679 struct mlx4_icm_table table;
225c7b1f
RD
680 struct mlx4_icm_table cmpt_table;
681 int have_irq;
682 u8 inta_pin;
683};
684
685struct mlx4_srq_table {
686 struct mlx4_bitmap bitmap;
687 spinlock_t lock;
688 struct radix_tree_root tree;
689 struct mlx4_icm_table table;
690 struct mlx4_icm_table cmpt_table;
691};
692
d57febe1
MB
693enum mlx4_qp_table_zones {
694 MLX4_QP_TABLE_ZONE_GENERAL,
695 MLX4_QP_TABLE_ZONE_RSS,
696 MLX4_QP_TABLE_ZONE_RAW_ETH,
697 MLX4_QP_TABLE_ZONE_NUM
698};
699
225c7b1f 700struct mlx4_qp_table {
d57febe1
MB
701 struct mlx4_bitmap *bitmap_gen;
702 struct mlx4_zone_allocator *zones;
703 u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM];
225c7b1f
RD
704 u32 rdmarc_base;
705 int rdmarc_shift;
706 spinlock_t lock;
707 struct mlx4_icm_table qp_table;
708 struct mlx4_icm_table auxc_table;
709 struct mlx4_icm_table altc_table;
710 struct mlx4_icm_table rdmarc_table;
711 struct mlx4_icm_table cmpt_table;
712};
713
714struct mlx4_mcg_table {
715 struct mutex mutex;
716 struct mlx4_bitmap bitmap;
717 struct mlx4_icm_table table;
718};
719
720struct mlx4_catas_err {
721 u32 __iomem *map;
ee49bd93
JM
722 struct timer_list timer;
723 struct list_head list;
225c7b1f
RD
724};
725
2a2336f8
YP
726#define MLX4_MAX_MAC_NUM 128
727#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
728
729struct mlx4_mac_table {
730 __be64 entries[MLX4_MAX_MAC_NUM];
731 int refs[MLX4_MAX_MAC_NUM];
732 struct mutex mutex;
733 int total;
734 int max;
735};
736
111c6094
JM
737#define MLX4_ROCE_GID_ENTRY_SIZE 16
738
739struct mlx4_roce_gid_entry {
740 u8 raw[MLX4_ROCE_GID_ENTRY_SIZE];
741};
742
743struct mlx4_roce_gid_table {
744 struct mlx4_roce_gid_entry roce_gids[MLX4_ROCE_MAX_GIDS];
745 struct mutex mutex;
746};
747
2a2336f8
YP
748#define MLX4_MAX_VLAN_NUM 128
749#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
750
751struct mlx4_vlan_table {
752 __be32 entries[MLX4_MAX_VLAN_NUM];
753 int refs[MLX4_MAX_VLAN_NUM];
754 struct mutex mutex;
755 int total;
756 int max;
757};
758
ffe455ad
EE
759#define SET_PORT_GEN_ALL_VALID 0x7
760#define SET_PORT_PROMISC_SHIFT 31
761#define SET_PORT_MC_PROMISC_SHIFT 30
762
763enum {
764 MCAST_DIRECT_ONLY = 0,
765 MCAST_DIRECT = 1,
766 MCAST_DEFAULT = 2
767};
768
769
770struct mlx4_set_port_general_context {
771 u8 reserved[3];
772 u8 flags;
773 u16 reserved2;
774 __be16 mtu;
775 u8 pptx;
776 u8 pfctx;
777 u16 reserved3;
778 u8 pprx;
779 u8 pfcrx;
780 u16 reserved4;
781};
782
783struct mlx4_set_port_rqp_calc_context {
784 __be32 base_qpn;
785 u8 rererved;
786 u8 n_mac;
787 u8 n_vlan;
788 u8 n_prio;
789 u8 reserved2[3];
790 u8 mac_miss;
791 u8 intra_no_vlan;
792 u8 no_vlan;
793 u8 intra_vlan_miss;
794 u8 vlan_miss;
795 u8 reserved3[3];
796 u8 no_vlan_prio;
797 __be32 promisc;
798 __be32 mcast;
799};
800
2a2336f8
YP
801struct mlx4_port_info {
802 struct mlx4_dev *dev;
803 int port;
7ff93f8b
YP
804 char dev_name[16];
805 struct device_attribute port_attr;
806 enum mlx4_port_type tmp_type;
096335b3
OG
807 char dev_mtu_name[16];
808 struct device_attribute port_mtu_attr;
2a2336f8
YP
809 struct mlx4_mac_table mac_table;
810 struct mlx4_vlan_table vlan_table;
111c6094 811 struct mlx4_roce_gid_table gid_table;
1679200f 812 int base_qpn;
2a2336f8
YP
813};
814
27bf91d6
YP
815struct mlx4_sense {
816 struct mlx4_dev *dev;
817 u8 do_sense_port[MLX4_MAX_PORTS + 1];
818 u8 sense_allowed[MLX4_MAX_PORTS + 1];
819 struct delayed_work sense_poll;
820};
821
0b7ca5a9
YP
822struct mlx4_msix_ctl {
823 u64 pool_bm;
730c41d5 824 struct mutex pool_lock;
0b7ca5a9
YP
825};
826
b12d93d6
YP
827struct mlx4_steer {
828 struct list_head promisc_qps[MLX4_NUM_STEERS];
829 struct list_head steer_entries[MLX4_NUM_STEERS];
b12d93d6
YP
830};
831
839f1243
RD
832enum {
833 MLX4_PCI_DEV_IS_VF = 1 << 0,
ca3e57a5 834 MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1,
839f1243
RD
835};
836
7c6d74d2
JM
837enum {
838 MLX4_NO_RR = 0,
839 MLX4_USE_RR = 1,
840};
841
225c7b1f
RD
842struct mlx4_priv {
843 struct mlx4_dev dev;
844
845 struct list_head dev_list;
846 struct list_head ctx_list;
847 spinlock_t ctx_lock;
848
839f1243 849 int pci_dev_data;
befdf897 850 int removed;
839f1243 851
6296883c
YP
852 struct list_head pgdir_list;
853 struct mutex pgdir_mutex;
854
225c7b1f
RD
855 struct mlx4_fw fw;
856 struct mlx4_cmd cmd;
623ed84b 857 struct mlx4_mfunc mfunc;
225c7b1f
RD
858
859 struct mlx4_bitmap pd_bitmap;
012a8ff5 860 struct mlx4_bitmap xrcd_bitmap;
225c7b1f
RD
861 struct mlx4_uar_table uar_table;
862 struct mlx4_mr_table mr_table;
863 struct mlx4_cq_table cq_table;
864 struct mlx4_eq_table eq_table;
865 struct mlx4_srq_table srq_table;
866 struct mlx4_qp_table qp_table;
867 struct mlx4_mcg_table mcg_table;
f2a3f6a3 868 struct mlx4_bitmap counters_bitmap;
225c7b1f
RD
869
870 struct mlx4_catas_err catas_err;
871
872 void __iomem *clr_base;
873
874 struct mlx4_uar driver_uar;
875 void __iomem *kar;
2a2336f8 876 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
27bf91d6 877 struct mlx4_sense sense;
7ff93f8b 878 struct mutex port_mutex;
0b7ca5a9 879 struct mlx4_msix_ctl msix_ctl;
b12d93d6 880 struct mlx4_steer *steer;
c1b43dca
EC
881 struct list_head bf_list;
882 struct mutex bf_mutex;
883 struct io_mapping *bf_mapping;
ddd8a6c1 884 void __iomem *clock_mapping;
ea51b377 885 int reserved_mtts;
0ff1fb65 886 int fs_hash_mode;
54679e14 887 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
53f33ae2
MS
888 struct mlx4_port_map v2p; /* cached port mapping configuration */
889 struct mutex bond_mutex; /* for bond mode */
afa8fd1d 890 __be64 slave_node_guids[MLX4_MFUNC_MAX];
54679e14 891
fe6f700d
YP
892 atomic_t opreq_count;
893 struct work_struct opreq_task;
225c7b1f
RD
894};
895
896static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
897{
898 return container_of(dev, struct mlx4_priv, dev);
899}
900
27bf91d6
YP
901#define MLX4_SENSE_RANGE (HZ * 3)
902
903extern struct workqueue_struct *mlx4_wq;
904
225c7b1f 905u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
7c6d74d2 906void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
ddae0349
EE
907u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
908 int align, u32 skip_mask);
7c6d74d2
JM
909void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
910 int use_rr);
42d1e017 911u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
93fc9e1b
YP
912int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
913 u32 reserved_bot, u32 resetrved_top);
225c7b1f
RD
914void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
915
916int mlx4_reset(struct mlx4_dev *dev);
917
b8dd786f
YP
918int mlx4_alloc_eq_table(struct mlx4_dev *dev);
919void mlx4_free_eq_table(struct mlx4_dev *dev);
920
225c7b1f 921int mlx4_init_pd_table(struct mlx4_dev *dev);
012a8ff5 922int mlx4_init_xrcd_table(struct mlx4_dev *dev);
225c7b1f
RD
923int mlx4_init_uar_table(struct mlx4_dev *dev);
924int mlx4_init_mr_table(struct mlx4_dev *dev);
925int mlx4_init_eq_table(struct mlx4_dev *dev);
926int mlx4_init_cq_table(struct mlx4_dev *dev);
927int mlx4_init_qp_table(struct mlx4_dev *dev);
928int mlx4_init_srq_table(struct mlx4_dev *dev);
929int mlx4_init_mcg_table(struct mlx4_dev *dev);
930
931void mlx4_cleanup_pd_table(struct mlx4_dev *dev);
012a8ff5 932void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev);
225c7b1f
RD
933void mlx4_cleanup_uar_table(struct mlx4_dev *dev);
934void mlx4_cleanup_mr_table(struct mlx4_dev *dev);
935void mlx4_cleanup_eq_table(struct mlx4_dev *dev);
936void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
937void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
938void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
939void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
40f2287b 940int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
c82e9aa0
EC
941void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
942int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
943void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
944int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
945void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
b20e519a
SM
946int __mlx4_mpt_reserve(struct mlx4_dev *dev);
947void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
40f2287b 948int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp);
b20e519a 949void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
c82e9aa0
EC
950u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
951void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
225c7b1f 952
623ed84b
JM
953int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
954 struct mlx4_vhcr *vhcr,
955 struct mlx4_cmd_mailbox *inbox,
956 struct mlx4_cmd_mailbox *outbox,
957 struct mlx4_cmd_info *cmd);
958int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
959 struct mlx4_vhcr *vhcr,
960 struct mlx4_cmd_mailbox *inbox,
961 struct mlx4_cmd_mailbox *outbox,
962 struct mlx4_cmd_info *cmd);
963int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
964 struct mlx4_vhcr *vhcr,
965 struct mlx4_cmd_mailbox *inbox,
966 struct mlx4_cmd_mailbox *outbox,
967 struct mlx4_cmd_info *cmd);
968int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
969 struct mlx4_vhcr *vhcr,
970 struct mlx4_cmd_mailbox *inbox,
971 struct mlx4_cmd_mailbox *outbox,
972 struct mlx4_cmd_info *cmd);
973int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
974 struct mlx4_vhcr *vhcr,
975 struct mlx4_cmd_mailbox *inbox,
976 struct mlx4_cmd_mailbox *outbox,
977 struct mlx4_cmd_info *cmd);
978int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
979 struct mlx4_vhcr *vhcr,
980 struct mlx4_cmd_mailbox *inbox,
981 struct mlx4_cmd_mailbox *outbox,
982 struct mlx4_cmd_info *cmd);
d475c95b
MB
983int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
984 struct mlx4_vhcr *vhcr,
985 struct mlx4_cmd_mailbox *inbox,
986 struct mlx4_cmd_mailbox *outbox,
987 struct mlx4_cmd_info *cmd);
623ed84b
JM
988int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
989 struct mlx4_vhcr *vhcr,
990 struct mlx4_cmd_mailbox *inbox,
991 struct mlx4_cmd_mailbox *outbox,
992 struct mlx4_cmd_info *cmd);
c82e9aa0 993int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
ddae0349 994 int *base, u8 flags);
c82e9aa0
EC
995void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
996int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
997void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
c82e9aa0
EC
998int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
999 int start_index, int npages, u64 *page_list);
ba062d52
JM
1000int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
1001void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
1002int __mlx4_xrcd_alloc(struct mlx4_dev *dev, u32 *xrcdn);
1003void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn);
623ed84b 1004
ee49bd93
JM
1005void mlx4_start_catas_poll(struct mlx4_dev *dev);
1006void mlx4_stop_catas_poll(struct mlx4_dev *dev);
ad9a0bf0
YH
1007int mlx4_catas_init(struct mlx4_dev *dev);
1008void mlx4_catas_end(struct mlx4_dev *dev);
ee49bd93 1009int mlx4_restart_one(struct pci_dev *pdev);
225c7b1f
RD
1010int mlx4_register_device(struct mlx4_dev *dev);
1011void mlx4_unregister_device(struct mlx4_dev *dev);
00f5ce99
JM
1012void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
1013 unsigned long param);
225c7b1f
RD
1014
1015struct mlx4_dev_cap;
1016struct mlx4_init_hca_param;
1017
1018u64 mlx4_make_profile(struct mlx4_dev *dev,
1019 struct mlx4_profile *request,
1020 struct mlx4_dev_cap *dev_cap,
1021 struct mlx4_init_hca_param *init_hca);
623ed84b
JM
1022void mlx4_master_comm_channel(struct work_struct *work);
1023void mlx4_gen_slave_eqe(struct work_struct *work);
1024void mlx4_master_handle_slave_flr(struct work_struct *work);
1025
1026int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1027 struct mlx4_vhcr *vhcr,
1028 struct mlx4_cmd_mailbox *inbox,
1029 struct mlx4_cmd_mailbox *outbox,
1030 struct mlx4_cmd_info *cmd);
1031int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1032 struct mlx4_vhcr *vhcr,
1033 struct mlx4_cmd_mailbox *inbox,
1034 struct mlx4_cmd_mailbox *outbox,
1035 struct mlx4_cmd_info *cmd);
1036int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
1037 struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
1038 struct mlx4_cmd_mailbox *outbox,
1039 struct mlx4_cmd_info *cmd);
1040int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
1041 struct mlx4_vhcr *vhcr,
1042 struct mlx4_cmd_mailbox *inbox,
1043 struct mlx4_cmd_mailbox *outbox,
1044 struct mlx4_cmd_info *cmd);
1045int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
1046 struct mlx4_vhcr *vhcr,
1047 struct mlx4_cmd_mailbox *inbox,
1048 struct mlx4_cmd_mailbox *outbox,
1049 struct mlx4_cmd_info *cmd);
1050int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
1051 struct mlx4_vhcr *vhcr,
1052 struct mlx4_cmd_mailbox *inbox,
1053 struct mlx4_cmd_mailbox *outbox,
1054 struct mlx4_cmd_info *cmd);
1055int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
1056 struct mlx4_vhcr *vhcr,
1057 struct mlx4_cmd_mailbox *inbox,
1058 struct mlx4_cmd_mailbox *outbox,
1059 struct mlx4_cmd_info *cmd);
1060int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
1061 struct mlx4_vhcr *vhcr,
1062 struct mlx4_cmd_mailbox *inbox,
1063 struct mlx4_cmd_mailbox *outbox,
1064 struct mlx4_cmd_info *cmd);
1065int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
1066 struct mlx4_vhcr *vhcr,
1067 struct mlx4_cmd_mailbox *inbox,
1068 struct mlx4_cmd_mailbox *outbox,
1069 struct mlx4_cmd_info *cmd);
1070int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
1071 struct mlx4_vhcr *vhcr,
1072 struct mlx4_cmd_mailbox *inbox,
1073 struct mlx4_cmd_mailbox *outbox,
1074 struct mlx4_cmd_info *cmd);
1075int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
1076 struct mlx4_vhcr *vhcr,
1077 struct mlx4_cmd_mailbox *inbox,
1078 struct mlx4_cmd_mailbox *outbox,
1079 struct mlx4_cmd_info *cmd);
1080int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
1081 struct mlx4_vhcr *vhcr,
1082 struct mlx4_cmd_mailbox *inbox,
1083 struct mlx4_cmd_mailbox *outbox,
1084 struct mlx4_cmd_info *cmd);
1085int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
1086 struct mlx4_vhcr *vhcr,
1087 struct mlx4_cmd_mailbox *inbox,
1088 struct mlx4_cmd_mailbox *outbox,
1089 struct mlx4_cmd_info *cmd);
1090int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
1091 struct mlx4_vhcr *vhcr,
1092 struct mlx4_cmd_mailbox *inbox,
1093 struct mlx4_cmd_mailbox *outbox,
1094 struct mlx4_cmd_info *cmd);
1095int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
1096 struct mlx4_vhcr *vhcr,
1097 struct mlx4_cmd_mailbox *inbox,
1098 struct mlx4_cmd_mailbox *outbox,
1099 struct mlx4_cmd_info *cmd);
1100int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1101 struct mlx4_vhcr *vhcr,
1102 struct mlx4_cmd_mailbox *inbox,
1103 struct mlx4_cmd_mailbox *outbox,
1104 struct mlx4_cmd_info *cmd);
54679e14
JM
1105int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1106 struct mlx4_vhcr *vhcr,
1107 struct mlx4_cmd_mailbox *inbox,
1108 struct mlx4_cmd_mailbox *outbox,
1109 struct mlx4_cmd_info *cmd);
623ed84b
JM
1110int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
1111 struct mlx4_vhcr *vhcr,
1112 struct mlx4_cmd_mailbox *inbox,
1113 struct mlx4_cmd_mailbox *outbox,
1114 struct mlx4_cmd_info *cmd);
54679e14
JM
1115int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1116 struct mlx4_vhcr *vhcr,
1117 struct mlx4_cmd_mailbox *inbox,
1118 struct mlx4_cmd_mailbox *outbox,
1119 struct mlx4_cmd_info *cmd);
1120int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1121 struct mlx4_vhcr *vhcr,
1122 struct mlx4_cmd_mailbox *inbox,
1123 struct mlx4_cmd_mailbox *outbox,
1124 struct mlx4_cmd_info *cmd);
1125int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1126 struct mlx4_vhcr *vhcr,
1127 struct mlx4_cmd_mailbox *inbox,
1128 struct mlx4_cmd_mailbox *outbox,
1129 struct mlx4_cmd_info *cmd);
1130int mlx4_2ERR_QP_wrapper(struct mlx4_dev *dev, int slave,
1131 struct mlx4_vhcr *vhcr,
1132 struct mlx4_cmd_mailbox *inbox,
1133 struct mlx4_cmd_mailbox *outbox,
1134 struct mlx4_cmd_info *cmd);
1135int mlx4_RTS2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
1136 struct mlx4_vhcr *vhcr,
1137 struct mlx4_cmd_mailbox *inbox,
1138 struct mlx4_cmd_mailbox *outbox,
1139 struct mlx4_cmd_info *cmd);
1140int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
1141 struct mlx4_vhcr *vhcr,
1142 struct mlx4_cmd_mailbox *inbox,
1143 struct mlx4_cmd_mailbox *outbox,
1144 struct mlx4_cmd_info *cmd);
1145int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
1146 struct mlx4_vhcr *vhcr,
1147 struct mlx4_cmd_mailbox *inbox,
1148 struct mlx4_cmd_mailbox *outbox,
1149 struct mlx4_cmd_info *cmd);
623ed84b
JM
1150int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
1151 struct mlx4_vhcr *vhcr,
1152 struct mlx4_cmd_mailbox *inbox,
1153 struct mlx4_cmd_mailbox *outbox,
1154 struct mlx4_cmd_info *cmd);
54679e14
JM
1155int mlx4_QUERY_QP_wrapper(struct mlx4_dev *dev, int slave,
1156 struct mlx4_vhcr *vhcr,
1157 struct mlx4_cmd_mailbox *inbox,
1158 struct mlx4_cmd_mailbox *outbox,
1159 struct mlx4_cmd_info *cmd);
623ed84b
JM
1160
1161int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
225c7b1f 1162
ffc39f6d
MB
1163enum {
1164 MLX4_CMD_CLEANUP_STRUCT = 1UL << 0,
1165 MLX4_CMD_CLEANUP_POOL = 1UL << 1,
1166 MLX4_CMD_CLEANUP_HCR = 1UL << 2,
1167 MLX4_CMD_CLEANUP_VHCR = 1UL << 3,
1168 MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1
1169};
1170
225c7b1f 1171int mlx4_cmd_init(struct mlx4_dev *dev);
ffc39f6d 1172void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask);
ab9c17a0 1173int mlx4_multi_func_init(struct mlx4_dev *dev);
55ad3592 1174int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev);
ab9c17a0 1175void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
225c7b1f
RD
1176void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
1177int mlx4_cmd_use_events(struct mlx4_dev *dev);
1178void mlx4_cmd_use_polling(struct mlx4_dev *dev);
1179
ab9c17a0 1180int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
0cd93027 1181 u16 op, unsigned long timeout);
ab9c17a0 1182
3dca0f42 1183void mlx4_cq_tasklet_cb(unsigned long data);
225c7b1f
RD
1184void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
1185void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
1186
1187void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
1188
1189void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
1190
f6bc11e4 1191void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
225c7b1f 1192
ab6dc30d
YP
1193int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
1194 enum mlx4_port_type *type);
27bf91d6
YP
1195void mlx4_do_sense_ports(struct mlx4_dev *dev,
1196 enum mlx4_port_type *stype,
1197 enum mlx4_port_type *defaults);
1198void mlx4_start_sense(struct mlx4_dev *dev);
1199void mlx4_stop_sense(struct mlx4_dev *dev);
1200void mlx4_sense_init(struct mlx4_dev *dev);
1201int mlx4_check_port_params(struct mlx4_dev *dev,
1202 enum mlx4_port_type *port_type);
1203int mlx4_change_port_types(struct mlx4_dev *dev,
1204 enum mlx4_port_type *port_types);
1205
2a2336f8
YP
1206void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
1207void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
111c6094
JM
1208void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
1209 struct mlx4_roce_gid_table *table);
2009d005 1210void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan);
3f7fb021 1211int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
2a2336f8 1212
6634961c 1213int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
623ed84b
JM
1214/* resource tracker functions*/
1215int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
1216 enum mlx4_resource resource_type,
aa1ec3dd 1217 u64 resource_id, int *slave);
623ed84b 1218void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
111c6094 1219void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave);
623ed84b
JM
1220int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1221
b8924951
JM
1222void mlx4_free_resource_tracker(struct mlx4_dev *dev,
1223 enum mlx4_res_tracker_free_type type);
623ed84b 1224
b91cb3eb
JM
1225int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1226 struct mlx4_vhcr *vhcr,
1227 struct mlx4_cmd_mailbox *inbox,
1228 struct mlx4_cmd_mailbox *outbox,
1229 struct mlx4_cmd_info *cmd);
623ed84b
JM
1230int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1231 struct mlx4_vhcr *vhcr,
1232 struct mlx4_cmd_mailbox *inbox,
1233 struct mlx4_cmd_mailbox *outbox,
1234 struct mlx4_cmd_info *cmd);
1235int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1236 struct mlx4_vhcr *vhcr,
1237 struct mlx4_cmd_mailbox *inbox,
1238 struct mlx4_cmd_mailbox *outbox,
1239 struct mlx4_cmd_info *cmd);
1240int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1241 struct mlx4_vhcr *vhcr,
1242 struct mlx4_cmd_mailbox *inbox,
1243 struct mlx4_cmd_mailbox *outbox,
1244 struct mlx4_cmd_info *cmd);
b91cb3eb
JM
1245int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1246 struct mlx4_vhcr *vhcr,
1247 struct mlx4_cmd_mailbox *inbox,
1248 struct mlx4_cmd_mailbox *outbox,
1249 struct mlx4_cmd_info *cmd);
623ed84b
JM
1250int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1251 struct mlx4_vhcr *vhcr,
1252 struct mlx4_cmd_mailbox *inbox,
1253 struct mlx4_cmd_mailbox *outbox,
1254 struct mlx4_cmd_info *cmd);
9a5aa622 1255int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
7ff93f8b 1256
6634961c
JM
1257int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1258 int *gid_tbl_len, int *pkey_tbl_len);
623ed84b
JM
1259
1260int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1261 struct mlx4_vhcr *vhcr,
1262 struct mlx4_cmd_mailbox *inbox,
1263 struct mlx4_cmd_mailbox *outbox,
1264 struct mlx4_cmd_info *cmd);
1265
ce8d9e0d
MB
1266int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
1267 struct mlx4_vhcr *vhcr,
1268 struct mlx4_cmd_mailbox *inbox,
1269 struct mlx4_cmd_mailbox *outbox,
1270 struct mlx4_cmd_info *cmd);
1271
623ed84b
JM
1272int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1273 struct mlx4_vhcr *vhcr,
1274 struct mlx4_cmd_mailbox *inbox,
1275 struct mlx4_cmd_mailbox *outbox,
1276 struct mlx4_cmd_info *cmd);
b12d93d6
YP
1277int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1278 enum mlx4_protocol prot, enum mlx4_steer_type steer);
1279int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
1280 int block_mcast_loopback, enum mlx4_protocol prot,
1281 enum mlx4_steer_type steer);
fd91c49f
HHZ
1282int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
1283 u8 gid[16], u8 port,
1284 int block_mcast_loopback,
1285 enum mlx4_protocol prot, u64 *reg_id);
623ed84b
JM
1286int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1287 struct mlx4_vhcr *vhcr,
1288 struct mlx4_cmd_mailbox *inbox,
1289 struct mlx4_cmd_mailbox *outbox,
1290 struct mlx4_cmd_info *cmd);
1291int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1292 struct mlx4_vhcr *vhcr,
1293 struct mlx4_cmd_mailbox *inbox,
1294 struct mlx4_cmd_mailbox *outbox,
1295 struct mlx4_cmd_info *cmd);
1296int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
1297 int port, void *buf);
1298int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
1299 struct mlx4_cmd_mailbox *outbox);
1300int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1301 struct mlx4_vhcr *vhcr,
1302 struct mlx4_cmd_mailbox *inbox,
1303 struct mlx4_cmd_mailbox *outbox,
1304 struct mlx4_cmd_info *cmd);
1305int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
1306 struct mlx4_vhcr *vhcr,
1307 struct mlx4_cmd_mailbox *inbox,
1308 struct mlx4_cmd_mailbox *outbox,
1309 struct mlx4_cmd_info *cmd);
1310int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
1311 struct mlx4_vhcr *vhcr,
1312 struct mlx4_cmd_mailbox *inbox,
1313 struct mlx4_cmd_mailbox *outbox,
1314 struct mlx4_cmd_info *cmd);
8fcfb4db
HHZ
1315int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1316 struct mlx4_vhcr *vhcr,
1317 struct mlx4_cmd_mailbox *inbox,
1318 struct mlx4_cmd_mailbox *outbox,
1319 struct mlx4_cmd_info *cmd);
1320int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
1321 struct mlx4_vhcr *vhcr,
1322 struct mlx4_cmd_mailbox *inbox,
1323 struct mlx4_cmd_mailbox *outbox,
1324 struct mlx4_cmd_info *cmd);
6e806699
SM
1325int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
1326 struct mlx4_vhcr *vhcr,
1327 struct mlx4_cmd_mailbox *inbox,
1328 struct mlx4_cmd_mailbox *outbox,
1329 struct mlx4_cmd_info *cmd);
f5311ac1 1330
0ec2c0f8
EE
1331int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
1332int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
1333
5cc914f1
MA
1334static inline void set_param_l(u64 *arg, u32 val)
1335{
e7dbeba8 1336 *arg = (*arg & 0xffffffff00000000ULL) | (u64) val;
5cc914f1
MA
1337}
1338
1339static inline void set_param_h(u64 *arg, u32 val)
1340{
1341 *arg = (*arg & 0xffffffff) | ((u64) val << 32);
1342}
1343
1344static inline u32 get_param_l(u64 *arg)
1345{
1346 return (u32) (*arg & 0xffffffff);
1347}
1348
1349static inline u32 get_param_h(u64 *arg)
1350{
1351 return (u32)(*arg >> 32);
1352}
1353
c82e9aa0
EC
1354static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
1355{
1356 return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
1357}
1358
f5311ac1
JM
1359#define NOT_MASKED_PD_BITS 17
1360
b01978ca
JM
1361void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
1362
5a0d0a61
JM
1363void mlx4_init_quotas(struct mlx4_dev *dev);
1364
449fc488 1365int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
f74462ac
MB
1366/* Returns the VF index of slave */
1367int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
114840c3 1368int mlx4_config_mad_demux(struct mlx4_dev *dev);
53f33ae2 1369int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
b6ffaeff 1370
7a89399f
MB
1371enum mlx4_zone_flags {
1372 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
1373 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1,
1374 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2,
1375 MLX4_ZONE_USE_RR = 1UL << 3,
1376};
1377
1378enum mlx4_zone_alloc_flags {
1379 /* No two objects could overlap between zones. UID
1380 * could be left unused. If this flag is given and
1381 * two overlapped zones are used, an object will be free'd
1382 * from the smallest possible matching zone.
1383 */
1384 MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0,
1385};
1386
1387struct mlx4_zone_allocator;
1388
1389/* Create a new zone allocator */
1390struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags);
1391
1392/* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator
1393 * <zone_alloc>. Allocating an object from this zone adds an offset <offset>.
1394 * Similarly, when searching for an object to free, this offset it taken into
1395 * account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap>
1396 * is given through the MLX4_ZONE_USE_RR flag in <flags>.
1397 * When an allocation fails, <zone_alloc> tries to allocate from other zones
1398 * according to the policy set by <flags>. <puid> is the unique identifier
1399 * received to this zone.
1400 */
1401int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
1402 struct mlx4_bitmap *bitmap,
1403 u32 flags,
1404 int priority,
1405 int offset,
1406 u32 *puid);
1407
1408/* Remove bitmap indicated by <uid> from <zone_alloc> */
1409int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid);
1410
1411/* Delete the zone allocator <zone_alloc. This function doesn't destroy
1412 * the attached bitmaps.
1413 */
1414void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
1415
1416/* Allocate <count> objects with align <align> and skip_mask <skip_mask>
1417 * from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually
1418 * allocated from is returned in <puid>. If the allocation fails, a negative
1419 * number is returned. Otherwise, the offset of the first object is returned.
1420 */
1421u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
1422 int align, u32 skip_mask, u32 *puid);
1423
1424/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
1425 * <zones>.
1426 */
1427u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
1428 u32 uid, u32 obj, u32 count);
1429
1430/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
1431 * specifying the uid when freeing an object, zone allocator could figure it by
1432 * itself. Other parameters are similar to mlx4_zone_free.
1433 */
1434u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count);
1435
1436/* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */
1437struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid);
1438
225c7b1f 1439#endif /* MLX4_H */
This page took 0.765597 seconds and 5 git commands to generate.