cxgb4/cxgb4vf: Add Devicde ID for two more adapter
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
b8ff05a9
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
b8ff05a9
DM
64#include <asm/uaccess.h>
65
66#include "cxgb4.h"
67#include "t4_regs.h"
68#include "t4_msg.h"
69#include "t4fw_api.h"
688848b1 70#include "cxgb4_dcb.h"
b8ff05a9
DM
71#include "l2t.h"
72
01bcca68
VP
73#include <../drivers/net/bonding/bonding.h>
74
75#ifdef DRV_VERSION
76#undef DRV_VERSION
77#endif
3a7f8554
SR
78#define DRV_VERSION "2.0.0-ko"
79#define DRV_DESC "Chelsio T4/T5 Network Driver"
b8ff05a9
DM
80
81/*
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
84 * recover.
85 */
86#define MAX_SGE_TIMERVAL 200U
87
7ee9ff94 88enum {
13ee15d3
VP
89 /*
90 * Physical Function provisioning constants.
91 */
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 */
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103
104#ifdef CONFIG_PCI_IOV
105 /*
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
111 * Egress Context.
112 */
7ee9ff94
CL
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 126#endif
7ee9ff94
CL
127};
128
129/*
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
133 * controls.
134 */
135static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
137{
138 unsigned int portn, portvec;
139
140 /*
141 * Give PF's access to all of the ports.
142 */
143 if (vf == 0)
144 return FW_PFVF_CMD_PMASK_MASK;
145
146 /*
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
152 */
153 if (adapter->params.nports == 0)
154 return 0;
155
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
158 for (;;) {
159 /*
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
164 */
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 if (portn == 0)
167 return pmask;
168 portn--;
169 portvec &= ~pmask;
170 }
171 /*NOTREACHED*/
172}
7ee9ff94 173
b8ff05a9
DM
174enum {
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
182 MIN_FL_ENTRIES = 16
183};
184
f2b7e78d
VP
185/* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
190 */
191struct filter_entry {
192 /* Administrative fields for filter.
193 */
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
196
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 */
206 struct ch_filter_specification fs;
207};
208
b8ff05a9
DM
209#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212
060e0c75 213#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9 214
9baa3c34 215static const struct pci_device_id cxgb4_pci_tbl[] = {
060e0c75 216 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
fb1e933d
HS
227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
ccea790e
DM
238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
f637d577
VP
248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
fb1e933d
HS
250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
9ef603a0
VP
259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
f0a8e6de
HS
278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
0183aa62
HS
280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
56e03e51 286 CH_DEVICE(0x5086, 4),
91c04a9e
HS
287 CH_DEVICE(0x5087, 4),
288 CH_DEVICE(0x5088, 4),
9ef603a0
VP
289 CH_DEVICE(0x5401, 4),
290 CH_DEVICE(0x5402, 4),
291 CH_DEVICE(0x5403, 4),
292 CH_DEVICE(0x5404, 4),
293 CH_DEVICE(0x5405, 4),
294 CH_DEVICE(0x5406, 4),
295 CH_DEVICE(0x5407, 4),
296 CH_DEVICE(0x5408, 4),
297 CH_DEVICE(0x5409, 4),
298 CH_DEVICE(0x540A, 4),
299 CH_DEVICE(0x540B, 4),
300 CH_DEVICE(0x540C, 4),
301 CH_DEVICE(0x540D, 4),
302 CH_DEVICE(0x540E, 4),
303 CH_DEVICE(0x540F, 4),
304 CH_DEVICE(0x5410, 4),
305 CH_DEVICE(0x5411, 4),
306 CH_DEVICE(0x5412, 4),
307 CH_DEVICE(0x5413, 4),
f0a8e6de
HS
308 CH_DEVICE(0x5414, 4),
309 CH_DEVICE(0x5415, 4),
0183aa62
HS
310 CH_DEVICE(0x5480, 4),
311 CH_DEVICE(0x5481, 4),
312 CH_DEVICE(0x5482, 4),
313 CH_DEVICE(0x5483, 4),
314 CH_DEVICE(0x5484, 4),
315 CH_DEVICE(0x5485, 4),
56e03e51 316 CH_DEVICE(0x5486, 4),
91c04a9e
HS
317 CH_DEVICE(0x5487, 4),
318 CH_DEVICE(0x5488, 4),
b8ff05a9
DM
319 { 0, }
320};
321
16e47624 322#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 323#define FW5_FNAME "cxgb4/t5fw.bin"
16e47624 324#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 325#define FW5_CFNAME "cxgb4/t5-config.txt"
b8ff05a9
DM
326
327MODULE_DESCRIPTION(DRV_DESC);
328MODULE_AUTHOR("Chelsio Communications");
329MODULE_LICENSE("Dual BSD/GPL");
330MODULE_VERSION(DRV_VERSION);
331MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 332MODULE_FIRMWARE(FW4_FNAME);
0a57a536 333MODULE_FIRMWARE(FW5_FNAME);
b8ff05a9 334
636f9d37
VP
335/*
336 * Normally we're willing to become the firmware's Master PF but will be happy
337 * if another PF has already become the Master and initialized the adapter.
338 * Setting "force_init" will cause this driver to forcibly establish itself as
339 * the Master PF and initialize the adapter.
340 */
341static uint force_init;
342
343module_param(force_init, uint, 0644);
344MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
345
13ee15d3
VP
346/*
347 * Normally if the firmware we connect to has Configuration File support, we
348 * use that and only fall back to the old Driver-based initialization if the
349 * Configuration File fails for some reason. If force_old_init is set, then
350 * we'll always use the old Driver-based initialization sequence.
351 */
352static uint force_old_init;
353
354module_param(force_old_init, uint, 0644);
355MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
356
b8ff05a9
DM
357static int dflt_msg_enable = DFLT_MSG_ENABLE;
358
359module_param(dflt_msg_enable, int, 0644);
360MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
361
362/*
363 * The driver uses the best interrupt scheme available on a platform in the
364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
365 * of these schemes the driver may consider as follows:
366 *
367 * msi = 2: choose from among all three options
368 * msi = 1: only consider MSI and INTx interrupts
369 * msi = 0: force INTx interrupts
370 */
371static int msi = 2;
372
373module_param(msi, int, 0644);
374MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
375
376/*
377 * Queue interrupt hold-off timer values. Queues default to the first of these
378 * upon creation.
379 */
380static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
381
382module_param_array(intr_holdoff, uint, NULL, 0644);
383MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
384 "0..4 in microseconds");
385
386static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
387
388module_param_array(intr_cnt, uint, NULL, 0644);
389MODULE_PARM_DESC(intr_cnt,
390 "thresholds 1..3 for queue interrupt packet counters");
391
636f9d37
VP
392/*
393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394 * offset by 2 bytes in order to have the IP headers line up on 4-byte
395 * boundaries. This is a requirement for many architectures which will throw
396 * a machine check fault if an attempt is made to access one of the 4-byte IP
397 * header fields on a non-4-byte boundary. And it's a major performance issue
398 * even on some architectures which allow it like some implementations of the
399 * x86 ISA. However, some architectures don't mind this and for some very
400 * edge-case performance sensitive applications (like forwarding large volumes
401 * of small packets), setting this DMA offset to 0 will decrease the number of
402 * PCI-E Bus transfers enough to measurably affect performance.
403 */
404static int rx_dma_offset = 2;
405
eb939922 406static bool vf_acls;
b8ff05a9
DM
407
408#ifdef CONFIG_PCI_IOV
409module_param(vf_acls, bool, 0644);
410MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
411
7d6727cf
SR
412/* Configure the number of PCI-E Virtual Function which are to be instantiated
413 * on SR-IOV Capable Physical Functions.
0a57a536 414 */
7d6727cf 415static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
b8ff05a9
DM
416
417module_param_array(num_vf, uint, NULL, 0644);
7d6727cf 418MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
b8ff05a9
DM
419#endif
420
688848b1
AB
421/* TX Queue select used to determine what algorithm to use for selecting TX
422 * queue. Select between the kernel provided function (select_queue=0) or user
423 * cxgb_select_queue function (select_queue=1)
424 *
425 * Default: select_queue=0
426 */
427static int select_queue;
428module_param(select_queue, int, 0644);
429MODULE_PARM_DESC(select_queue,
430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
431
13ee15d3
VP
432/*
433 * The filter TCAM has a fixed portion and a variable portion. The fixed
434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435 * ports. The variable portion is 36 bits which can include things like Exact
436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438 * far exceed the 36-bit budget for this "compressed" header portion of the
439 * filter. Thus, we have a scarce resource which must be carefully managed.
440 *
441 * By default we set this up to mostly match the set of filter matching
442 * capabilities of T3 but with accommodations for some of T4's more
443 * interesting features:
444 *
445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446 * [Inner] VLAN (17), Port (3), FCoE (1) }
447 */
448enum {
449 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
450 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
451 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
452};
453
454static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
455
f2b7e78d
VP
456module_param(tp_vlan_pri_map, uint, 0644);
457MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
458
b8ff05a9
DM
459static struct dentry *cxgb4_debugfs_root;
460
461static LIST_HEAD(adapter_list);
462static DEFINE_MUTEX(uld_mutex);
01bcca68
VP
463/* Adapter list to be accessed from atomic context */
464static LIST_HEAD(adap_rcu_list);
465static DEFINE_SPINLOCK(adap_rcu_lock);
b8ff05a9
DM
466static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
467static const char *uld_str[] = { "RDMA", "iSCSI" };
468
469static void link_report(struct net_device *dev)
470{
471 if (!netif_carrier_ok(dev))
472 netdev_info(dev, "link down\n");
473 else {
474 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
475
476 const char *s = "10Mbps";
477 const struct port_info *p = netdev_priv(dev);
478
479 switch (p->link_cfg.speed) {
e8b39015 480 case 10000:
b8ff05a9
DM
481 s = "10Gbps";
482 break;
e8b39015 483 case 1000:
b8ff05a9
DM
484 s = "1000Mbps";
485 break;
e8b39015 486 case 100:
b8ff05a9
DM
487 s = "100Mbps";
488 break;
e8b39015 489 case 40000:
72aca4bf
KS
490 s = "40Gbps";
491 break;
b8ff05a9
DM
492 }
493
494 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
495 fc[p->link_cfg.fc]);
496 }
497}
498
688848b1
AB
499#ifdef CONFIG_CHELSIO_T4_DCB
500/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
501static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
502{
503 struct port_info *pi = netdev_priv(dev);
504 struct adapter *adap = pi->adapter;
505 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
506 int i;
507
508 /* We use a simple mapping of Port TX Queue Index to DCB
509 * Priority when we're enabling DCB.
510 */
511 for (i = 0; i < pi->nqsets; i++, txq++) {
512 u32 name, value;
513 int err;
514
515 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
517 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
518 value = enable ? i : 0xffffffff;
519
520 /* Since we can be called while atomic (from "interrupt
521 * level") we need to issue the Set Parameters Commannd
522 * without sleeping (timeout < 0).
523 */
524 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
525 &name, &value);
526
527 if (err)
528 dev_err(adap->pdev_dev,
529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 enable ? "set" : "unset", pi->port_id, i, -err);
10b00466
AB
531 else
532 txq->dcb_prio = value;
688848b1
AB
533 }
534}
535#endif /* CONFIG_CHELSIO_T4_DCB */
536
b8ff05a9
DM
537void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
538{
539 struct net_device *dev = adapter->port[port_id];
540
541 /* Skip changes from disabled ports. */
542 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
543 if (link_stat)
544 netif_carrier_on(dev);
688848b1
AB
545 else {
546#ifdef CONFIG_CHELSIO_T4_DCB
547 cxgb4_dcb_state_init(dev);
548 dcb_tx_queue_prio_enable(dev, false);
549#endif /* CONFIG_CHELSIO_T4_DCB */
b8ff05a9 550 netif_carrier_off(dev);
688848b1 551 }
b8ff05a9
DM
552
553 link_report(dev);
554 }
555}
556
557void t4_os_portmod_changed(const struct adapter *adap, int port_id)
558{
559 static const char *mod_str[] = {
a0881cab 560 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
561 };
562
563 const struct net_device *dev = adap->port[port_id];
564 const struct port_info *pi = netdev_priv(dev);
565
566 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
567 netdev_info(dev, "port module unplugged\n");
a0881cab 568 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
569 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
570}
571
572/*
573 * Configure the exact and hash address filters to handle a port's multicast
574 * and secondary unicast MAC addresses.
575 */
576static int set_addr_filters(const struct net_device *dev, bool sleep)
577{
578 u64 mhash = 0;
579 u64 uhash = 0;
580 bool free = true;
581 u16 filt_idx[7];
582 const u8 *addr[7];
583 int ret, naddr = 0;
b8ff05a9
DM
584 const struct netdev_hw_addr *ha;
585 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 586 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 587 const struct port_info *pi = netdev_priv(dev);
060e0c75 588 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
589
590 /* first do the secondary unicast addresses */
591 netdev_for_each_uc_addr(ha, dev) {
592 addr[naddr++] = ha->addr;
593 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 594 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
595 naddr, addr, filt_idx, &uhash, sleep);
596 if (ret < 0)
597 return ret;
598
599 free = false;
600 naddr = 0;
601 }
602 }
603
604 /* next set up the multicast addresses */
4a35ecf8
DM
605 netdev_for_each_mc_addr(ha, dev) {
606 addr[naddr++] = ha->addr;
607 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 608 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
609 naddr, addr, filt_idx, &mhash, sleep);
610 if (ret < 0)
611 return ret;
612
613 free = false;
614 naddr = 0;
615 }
616 }
617
060e0c75 618 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
619 uhash | mhash, sleep);
620}
621
3069ee9b
VP
622int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
623module_param(dbfifo_int_thresh, int, 0644);
624MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
625
404d9e3f
VP
626/*
627 * usecs to sleep while draining the dbfifo
628 */
629static int dbfifo_drain_delay = 1000;
3069ee9b
VP
630module_param(dbfifo_drain_delay, int, 0644);
631MODULE_PARM_DESC(dbfifo_drain_delay,
632 "usecs to sleep while draining the dbfifo");
633
b8ff05a9
DM
634/*
635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636 * If @mtu is -1 it is left unchanged.
637 */
638static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
639{
640 int ret;
641 struct port_info *pi = netdev_priv(dev);
642
643 ret = set_addr_filters(dev, sleep_ok);
644 if (ret == 0)
060e0c75 645 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 646 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 647 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
648 sleep_ok);
649 return ret;
650}
651
652/**
653 * link_start - enable a port
654 * @dev: the port to enable
655 *
656 * Performs the MAC and PHY actions needed to enable a port.
657 */
658static int link_start(struct net_device *dev)
659{
660 int ret;
661 struct port_info *pi = netdev_priv(dev);
060e0c75 662 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
663
664 /*
665 * We do not set address filters and promiscuity here, the stack does
666 * that step explicitly.
667 */
060e0c75 668 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 669 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 670 if (ret == 0) {
060e0c75 671 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 672 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 673 true);
b8ff05a9
DM
674 if (ret >= 0) {
675 pi->xact_addr_filt = ret;
676 ret = 0;
677 }
678 }
679 if (ret == 0)
060e0c75
DM
680 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
681 &pi->link_cfg);
30f00847
AB
682 if (ret == 0) {
683 local_bh_disable();
688848b1
AB
684 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
685 true, CXGB4_DCB_ENABLED);
30f00847
AB
686 local_bh_enable();
687 }
688848b1 688
b8ff05a9
DM
689 return ret;
690}
691
688848b1
AB
692int cxgb4_dcb_enabled(const struct net_device *dev)
693{
694#ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev);
696
697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
698#else
699 return 0;
700#endif
701}
702EXPORT_SYMBOL(cxgb4_dcb_enabled);
703
704#ifdef CONFIG_CHELSIO_T4_DCB
705/* Handle a Data Center Bridging update message from the firmware. */
706static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
707{
708 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
709 struct net_device *dev = adap->port[port];
710 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
711 int new_dcb_enabled;
712
713 cxgb4_dcb_handle_fw_update(adap, pcmd);
714 new_dcb_enabled = cxgb4_dcb_enabled(dev);
715
716 /* If the DCB has become enabled or disabled on the port then we're
717 * going to need to set up/tear down DCB Priority parameters for the
718 * TX Queues associated with the port.
719 */
720 if (new_dcb_enabled != old_dcb_enabled)
721 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
722}
723#endif /* CONFIG_CHELSIO_T4_DCB */
724
f2b7e78d
VP
725/* Clear a filter and release any of its resources that we own. This also
726 * clears the filter's "pending" status.
727 */
728static void clear_filter(struct adapter *adap, struct filter_entry *f)
729{
730 /* If the new or old filter have loopback rewriteing rules then we'll
731 * need to free any existing Layer Two Table (L2T) entries of the old
732 * filter rule. The firmware will handle freeing up any Source MAC
733 * Table (SMT) entries used for rewriting Source MAC Addresses in
734 * loopback rules.
735 */
736 if (f->l2t)
737 cxgb4_l2t_release(f->l2t);
738
739 /* The zeroing of the filter rule below clears the filter valid,
740 * pending, locked flags, l2t pointer, etc. so it's all we need for
741 * this operation.
742 */
743 memset(f, 0, sizeof(*f));
744}
745
746/* Handle a filter write/deletion reply.
747 */
748static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
749{
750 unsigned int idx = GET_TID(rpl);
751 unsigned int nidx = idx - adap->tids.ftid_base;
752 unsigned int ret;
753 struct filter_entry *f;
754
755 if (idx >= adap->tids.ftid_base && nidx <
756 (adap->tids.nftids + adap->tids.nsftids)) {
757 idx = nidx;
758 ret = GET_TCB_COOKIE(rpl->cookie);
759 f = &adap->tids.ftid_tab[idx];
760
761 if (ret == FW_FILTER_WR_FLT_DELETED) {
762 /* Clear the filter when we get confirmation from the
763 * hardware that the filter has been deleted.
764 */
765 clear_filter(adap, f);
766 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
767 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
768 idx);
769 clear_filter(adap, f);
770 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
771 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
772 f->pending = 0; /* asynchronous setup completed */
773 f->valid = 1;
774 } else {
775 /* Something went wrong. Issue a warning about the
776 * problem and clear everything out.
777 */
778 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
779 idx, ret);
780 clear_filter(adap, f);
781 }
782 }
783}
784
785/* Response queue handler for the FW event queue.
b8ff05a9
DM
786 */
787static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
788 const struct pkt_gl *gl)
789{
790 u8 opcode = ((const struct rss_header *)rsp)->opcode;
791
792 rsp++; /* skip RSS header */
b407a4a9
VP
793
794 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
795 */
796 if (unlikely(opcode == CPL_FW4_MSG &&
797 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
798 rsp++;
799 opcode = ((const struct rss_header *)rsp)->opcode;
800 rsp++;
801 if (opcode != CPL_SGE_EGR_UPDATE) {
802 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
803 , opcode);
804 goto out;
805 }
806 }
807
b8ff05a9
DM
808 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
809 const struct cpl_sge_egr_update *p = (void *)rsp;
810 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 811 struct sge_txq *txq;
b8ff05a9 812
e46dab4d 813 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 814 txq->restarts++;
e46dab4d 815 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
816 struct sge_eth_txq *eq;
817
818 eq = container_of(txq, struct sge_eth_txq, q);
819 netif_tx_wake_queue(eq->txq);
820 } else {
821 struct sge_ofld_txq *oq;
822
823 oq = container_of(txq, struct sge_ofld_txq, q);
824 tasklet_schedule(&oq->qresume_tsk);
825 }
826 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
827 const struct cpl_fw6_msg *p = (void *)rsp;
828
688848b1
AB
829#ifdef CONFIG_CHELSIO_T4_DCB
830 const struct fw_port_cmd *pcmd = (const void *)p->data;
831 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
832 unsigned int action =
833 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
834
835 if (cmd == FW_PORT_CMD &&
836 action == FW_PORT_ACTION_GET_PORT_INFO) {
837 int port = FW_PORT_CMD_PORTID_GET(
838 be32_to_cpu(pcmd->op_to_portid));
839 struct net_device *dev = q->adap->port[port];
840 int state_input = ((pcmd->u.info.dcbxdis_pkd &
841 FW_PORT_CMD_DCBXDIS)
842 ? CXGB4_DCB_INPUT_FW_DISABLED
843 : CXGB4_DCB_INPUT_FW_ENABLED);
844
845 cxgb4_dcb_state_fsm(dev, state_input);
846 }
847
848 if (cmd == FW_PORT_CMD &&
849 action == FW_PORT_ACTION_L2_DCB_CFG)
850 dcb_rpl(q->adap, pcmd);
851 else
852#endif
853 if (p->type == 0)
854 t4_handle_fw_rpl(q->adap, p->data);
b8ff05a9
DM
855 } else if (opcode == CPL_L2T_WRITE_RPL) {
856 const struct cpl_l2t_write_rpl *p = (void *)rsp;
857
858 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
859 } else if (opcode == CPL_SET_TCB_RPL) {
860 const struct cpl_set_tcb_rpl *p = (void *)rsp;
861
862 filter_rpl(q->adap, p);
b8ff05a9
DM
863 } else
864 dev_err(q->adap->pdev_dev,
865 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 866out:
b8ff05a9
DM
867 return 0;
868}
869
870/**
871 * uldrx_handler - response queue handler for ULD queues
872 * @q: the response queue that received the packet
873 * @rsp: the response queue descriptor holding the offload message
874 * @gl: the gather list of packet fragments
875 *
876 * Deliver an ingress offload packet to a ULD. All processing is done by
877 * the ULD, we just maintain statistics.
878 */
879static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
880 const struct pkt_gl *gl)
881{
882 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
883
b407a4a9
VP
884 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
885 */
886 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
887 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
888 rsp += 2;
889
b8ff05a9
DM
890 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
891 rxq->stats.nomem++;
892 return -1;
893 }
894 if (gl == NULL)
895 rxq->stats.imm++;
896 else if (gl == CXGB4_MSG_AN)
897 rxq->stats.an++;
898 else
899 rxq->stats.pkts++;
900 return 0;
901}
902
903static void disable_msi(struct adapter *adapter)
904{
905 if (adapter->flags & USING_MSIX) {
906 pci_disable_msix(adapter->pdev);
907 adapter->flags &= ~USING_MSIX;
908 } else if (adapter->flags & USING_MSI) {
909 pci_disable_msi(adapter->pdev);
910 adapter->flags &= ~USING_MSI;
911 }
912}
913
914/*
915 * Interrupt handler for non-data events used with MSI-X.
916 */
917static irqreturn_t t4_nondata_intr(int irq, void *cookie)
918{
919 struct adapter *adap = cookie;
920
921 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
922 if (v & PFSW) {
923 adap->swintr = 1;
924 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
925 }
926 t4_slow_intr_handler(adap);
927 return IRQ_HANDLED;
928}
929
930/*
931 * Name the MSI-X interrupts.
932 */
933static void name_msix_vecs(struct adapter *adap)
934{
ba27816c 935 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
936
937 /* non-data interrupts */
b1a3c2b6 938 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
939
940 /* FW events */
b1a3c2b6
DM
941 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
942 adap->port[0]->name);
b8ff05a9
DM
943
944 /* Ethernet queues */
945 for_each_port(adap, j) {
946 struct net_device *d = adap->port[j];
947 const struct port_info *pi = netdev_priv(d);
948
ba27816c 949 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
950 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
951 d->name, i);
b8ff05a9
DM
952 }
953
954 /* offload queues */
ba27816c
DM
955 for_each_ofldrxq(&adap->sge, i)
956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 957 adap->port[0]->name, i);
ba27816c
DM
958
959 for_each_rdmarxq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 961 adap->port[0]->name, i);
cf38be6d
HS
962
963 for_each_rdmaciq(&adap->sge, i)
964 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
965 adap->port[0]->name, i);
b8ff05a9
DM
966}
967
968static int request_msix_queue_irqs(struct adapter *adap)
969{
970 struct sge *s = &adap->sge;
cf38be6d
HS
971 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
972 int msi_index = 2;
b8ff05a9
DM
973
974 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
975 adap->msix_info[1].desc, &s->fw_evtq);
976 if (err)
977 return err;
978
979 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
980 err = request_irq(adap->msix_info[msi_index].vec,
981 t4_sge_intr_msix, 0,
982 adap->msix_info[msi_index].desc,
b8ff05a9
DM
983 &s->ethrxq[ethqidx].rspq);
984 if (err)
985 goto unwind;
404d9e3f 986 msi_index++;
b8ff05a9
DM
987 }
988 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
989 err = request_irq(adap->msix_info[msi_index].vec,
990 t4_sge_intr_msix, 0,
991 adap->msix_info[msi_index].desc,
b8ff05a9
DM
992 &s->ofldrxq[ofldqidx].rspq);
993 if (err)
994 goto unwind;
404d9e3f 995 msi_index++;
b8ff05a9
DM
996 }
997 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
998 err = request_irq(adap->msix_info[msi_index].vec,
999 t4_sge_intr_msix, 0,
1000 adap->msix_info[msi_index].desc,
b8ff05a9
DM
1001 &s->rdmarxq[rdmaqidx].rspq);
1002 if (err)
1003 goto unwind;
404d9e3f 1004 msi_index++;
b8ff05a9 1005 }
cf38be6d
HS
1006 for_each_rdmaciq(s, rdmaciqqidx) {
1007 err = request_irq(adap->msix_info[msi_index].vec,
1008 t4_sge_intr_msix, 0,
1009 adap->msix_info[msi_index].desc,
1010 &s->rdmaciq[rdmaciqqidx].rspq);
1011 if (err)
1012 goto unwind;
1013 msi_index++;
1014 }
b8ff05a9
DM
1015 return 0;
1016
1017unwind:
cf38be6d
HS
1018 while (--rdmaciqqidx >= 0)
1019 free_irq(adap->msix_info[--msi_index].vec,
1020 &s->rdmaciq[rdmaciqqidx].rspq);
b8ff05a9 1021 while (--rdmaqidx >= 0)
404d9e3f 1022 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
1023 &s->rdmarxq[rdmaqidx].rspq);
1024 while (--ofldqidx >= 0)
404d9e3f 1025 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
1026 &s->ofldrxq[ofldqidx].rspq);
1027 while (--ethqidx >= 0)
404d9e3f
VP
1028 free_irq(adap->msix_info[--msi_index].vec,
1029 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
1030 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1031 return err;
1032}
1033
1034static void free_msix_queue_irqs(struct adapter *adap)
1035{
404d9e3f 1036 int i, msi_index = 2;
b8ff05a9
DM
1037 struct sge *s = &adap->sge;
1038
1039 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1040 for_each_ethrxq(s, i)
404d9e3f 1041 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 1042 for_each_ofldrxq(s, i)
404d9e3f 1043 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 1044 for_each_rdmarxq(s, i)
404d9e3f 1045 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
cf38be6d
HS
1046 for_each_rdmaciq(s, i)
1047 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
b8ff05a9
DM
1048}
1049
671b0060
DM
1050/**
1051 * write_rss - write the RSS table for a given port
1052 * @pi: the port
1053 * @queues: array of queue indices for RSS
1054 *
1055 * Sets up the portion of the HW RSS table for the port's VI to distribute
1056 * packets to the Rx queues in @queues.
1057 */
1058static int write_rss(const struct port_info *pi, const u16 *queues)
1059{
1060 u16 *rss;
1061 int i, err;
1062 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1063
1064 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1065 if (!rss)
1066 return -ENOMEM;
1067
1068 /* map the queue indices to queue ids */
1069 for (i = 0; i < pi->rss_size; i++, queues++)
1070 rss[i] = q[*queues].rspq.abs_id;
1071
060e0c75
DM
1072 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1073 pi->rss_size, rss, pi->rss_size);
671b0060
DM
1074 kfree(rss);
1075 return err;
1076}
1077
b8ff05a9
DM
1078/**
1079 * setup_rss - configure RSS
1080 * @adap: the adapter
1081 *
671b0060 1082 * Sets up RSS for each port.
b8ff05a9
DM
1083 */
1084static int setup_rss(struct adapter *adap)
1085{
671b0060 1086 int i, err;
b8ff05a9
DM
1087
1088 for_each_port(adap, i) {
1089 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 1090
671b0060 1091 err = write_rss(pi, pi->rss);
b8ff05a9
DM
1092 if (err)
1093 return err;
1094 }
1095 return 0;
1096}
1097
e46dab4d
DM
1098/*
1099 * Return the channel of the ingress queue with the given qid.
1100 */
1101static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1102{
1103 qid -= p->ingr_start;
1104 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1105}
1106
b8ff05a9
DM
1107/*
1108 * Wait until all NAPI handlers are descheduled.
1109 */
1110static void quiesce_rx(struct adapter *adap)
1111{
1112 int i;
1113
1114 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1115 struct sge_rspq *q = adap->sge.ingr_map[i];
1116
1117 if (q && q->handler)
1118 napi_disable(&q->napi);
1119 }
1120}
1121
1122/*
1123 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1124 */
1125static void enable_rx(struct adapter *adap)
1126{
1127 int i;
1128
1129 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1130 struct sge_rspq *q = adap->sge.ingr_map[i];
1131
1132 if (!q)
1133 continue;
1134 if (q->handler)
1135 napi_enable(&q->napi);
1136 /* 0-increment GTS to start the timer and enable interrupts */
1137 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1138 SEINTARM(q->intr_params) |
1139 INGRESSQID(q->cntxt_id));
1140 }
1141}
1142
1143/**
1144 * setup_sge_queues - configure SGE Tx/Rx/response queues
1145 * @adap: the adapter
1146 *
1147 * Determines how many sets of SGE queues to use and initializes them.
1148 * We support multiple queue sets per port if we have MSI-X, otherwise
1149 * just one queue set per port.
1150 */
1151static int setup_sge_queues(struct adapter *adap)
1152{
1153 int err, msi_idx, i, j;
1154 struct sge *s = &adap->sge;
1155
1156 bitmap_zero(s->starving_fl, MAX_EGRQ);
1157 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1158
1159 if (adap->flags & USING_MSIX)
1160 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1161 else {
1162 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1163 NULL, NULL);
1164 if (err)
1165 return err;
1166 msi_idx = -((int)s->intrq.abs_id + 1);
1167 }
1168
1169 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1170 msi_idx, NULL, fwevtq_handler);
1171 if (err) {
1172freeout: t4_free_sge_resources(adap);
1173 return err;
1174 }
1175
1176 for_each_port(adap, i) {
1177 struct net_device *dev = adap->port[i];
1178 struct port_info *pi = netdev_priv(dev);
1179 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1180 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1181
1182 for (j = 0; j < pi->nqsets; j++, q++) {
1183 if (msi_idx > 0)
1184 msi_idx++;
1185 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1186 msi_idx, &q->fl,
1187 t4_ethrx_handler);
1188 if (err)
1189 goto freeout;
1190 q->rspq.idx = j;
1191 memset(&q->stats, 0, sizeof(q->stats));
1192 }
1193 for (j = 0; j < pi->nqsets; j++, t++) {
1194 err = t4_sge_alloc_eth_txq(adap, t, dev,
1195 netdev_get_tx_queue(dev, j),
1196 s->fw_evtq.cntxt_id);
1197 if (err)
1198 goto freeout;
1199 }
1200 }
1201
1202 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1203 for_each_ofldrxq(s, i) {
1204 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1205 struct net_device *dev = adap->port[i / j];
1206
1207 if (msi_idx > 0)
1208 msi_idx++;
1209 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
cf38be6d
HS
1210 q->fl.size ? &q->fl : NULL,
1211 uldrx_handler);
b8ff05a9
DM
1212 if (err)
1213 goto freeout;
1214 memset(&q->stats, 0, sizeof(q->stats));
1215 s->ofld_rxq[i] = q->rspq.abs_id;
1216 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1217 s->fw_evtq.cntxt_id);
1218 if (err)
1219 goto freeout;
1220 }
1221
1222 for_each_rdmarxq(s, i) {
1223 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1224
1225 if (msi_idx > 0)
1226 msi_idx++;
1227 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
cf38be6d
HS
1228 msi_idx, q->fl.size ? &q->fl : NULL,
1229 uldrx_handler);
b8ff05a9
DM
1230 if (err)
1231 goto freeout;
1232 memset(&q->stats, 0, sizeof(q->stats));
1233 s->rdma_rxq[i] = q->rspq.abs_id;
1234 }
1235
cf38be6d
HS
1236 for_each_rdmaciq(s, i) {
1237 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1238
1239 if (msi_idx > 0)
1240 msi_idx++;
1241 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1242 msi_idx, q->fl.size ? &q->fl : NULL,
1243 uldrx_handler);
1244 if (err)
1245 goto freeout;
1246 memset(&q->stats, 0, sizeof(q->stats));
1247 s->rdma_ciq[i] = q->rspq.abs_id;
1248 }
1249
b8ff05a9
DM
1250 for_each_port(adap, i) {
1251 /*
1252 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1253 * have RDMA queues, and that's the right value.
1254 */
1255 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1256 s->fw_evtq.cntxt_id,
1257 s->rdmarxq[i].rspq.cntxt_id);
1258 if (err)
1259 goto freeout;
1260 }
1261
9bb59b96
HS
1262 t4_write_reg(adap, is_t4(adap->params.chip) ?
1263 MPS_TRC_RSS_CONTROL :
1264 MPS_T5_TRC_RSS_CONTROL,
b8ff05a9
DM
1265 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1266 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1267 return 0;
1268}
1269
b8ff05a9
DM
1270/*
1271 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1272 * The allocated memory is cleared.
1273 */
1274void *t4_alloc_mem(size_t size)
1275{
8be04b93 1276 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
b8ff05a9
DM
1277
1278 if (!p)
89bf67f1 1279 p = vzalloc(size);
b8ff05a9
DM
1280 return p;
1281}
1282
1283/*
1284 * Free memory allocated through alloc_mem().
1285 */
31b9c19b 1286static void t4_free_mem(void *addr)
b8ff05a9
DM
1287{
1288 if (is_vmalloc_addr(addr))
1289 vfree(addr);
1290 else
1291 kfree(addr);
1292}
1293
f2b7e78d
VP
1294/* Send a Work Request to write the filter at a specified index. We construct
1295 * a Firmware Filter Work Request to have the work done and put the indicated
1296 * filter into "pending" mode which will prevent any further actions against
1297 * it till we get a reply from the firmware on the completion status of the
1298 * request.
1299 */
1300static int set_filter_wr(struct adapter *adapter, int fidx)
1301{
1302 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1303 struct sk_buff *skb;
1304 struct fw_filter_wr *fwr;
1305 unsigned int ftid;
1306
1307 /* If the new filter requires loopback Destination MAC and/or VLAN
1308 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1309 * the filter.
1310 */
1311 if (f->fs.newdmac || f->fs.newvlan) {
1312 /* allocate L2T entry for new filter */
1313 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1314 if (f->l2t == NULL)
1315 return -EAGAIN;
1316 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1317 f->fs.eport, f->fs.dmac)) {
1318 cxgb4_l2t_release(f->l2t);
1319 f->l2t = NULL;
1320 return -ENOMEM;
1321 }
1322 }
1323
1324 ftid = adapter->tids.ftid_base + fidx;
1325
1326 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1327 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1328 memset(fwr, 0, sizeof(*fwr));
1329
1330 /* It would be nice to put most of the following in t4_hw.c but most
1331 * of the work is translating the cxgbtool ch_filter_specification
1332 * into the Work Request and the definition of that structure is
1333 * currently in cxgbtool.h which isn't appropriate to pull into the
1334 * common code. We may eventually try to come up with a more neutral
1335 * filter specification structure but for now it's easiest to simply
1336 * put this fairly direct code in line ...
1337 */
1338 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1339 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1340 fwr->tid_to_iq =
1341 htonl(V_FW_FILTER_WR_TID(ftid) |
1342 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1343 V_FW_FILTER_WR_NOREPLY(0) |
1344 V_FW_FILTER_WR_IQ(f->fs.iq));
1345 fwr->del_filter_to_l2tix =
1346 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1347 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1348 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1349 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1350 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1351 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1352 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1353 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1354 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1355 f->fs.newvlan == VLAN_REWRITE) |
1356 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1357 f->fs.newvlan == VLAN_REWRITE) |
1358 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1359 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1360 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1361 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1362 fwr->ethtype = htons(f->fs.val.ethtype);
1363 fwr->ethtypem = htons(f->fs.mask.ethtype);
1364 fwr->frag_to_ovlan_vldm =
1365 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1366 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1367 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1368 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1369 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1370 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1371 fwr->smac_sel = 0;
1372 fwr->rx_chan_rx_rpl_iq =
1373 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1374 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1375 fwr->maci_to_matchtypem =
1376 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1377 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1378 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1379 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1380 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1381 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1382 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1383 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1384 fwr->ptcl = f->fs.val.proto;
1385 fwr->ptclm = f->fs.mask.proto;
1386 fwr->ttyp = f->fs.val.tos;
1387 fwr->ttypm = f->fs.mask.tos;
1388 fwr->ivlan = htons(f->fs.val.ivlan);
1389 fwr->ivlanm = htons(f->fs.mask.ivlan);
1390 fwr->ovlan = htons(f->fs.val.ovlan);
1391 fwr->ovlanm = htons(f->fs.mask.ovlan);
1392 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1393 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1394 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1395 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1396 fwr->lp = htons(f->fs.val.lport);
1397 fwr->lpm = htons(f->fs.mask.lport);
1398 fwr->fp = htons(f->fs.val.fport);
1399 fwr->fpm = htons(f->fs.mask.fport);
1400 if (f->fs.newsmac)
1401 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1402
1403 /* Mark the filter as "pending" and ship off the Filter Work Request.
1404 * When we get the Work Request Reply we'll clear the pending status.
1405 */
1406 f->pending = 1;
1407 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1408 t4_ofld_send(adapter, skb);
1409 return 0;
1410}
1411
1412/* Delete the filter at a specified index.
1413 */
1414static int del_filter_wr(struct adapter *adapter, int fidx)
1415{
1416 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1417 struct sk_buff *skb;
1418 struct fw_filter_wr *fwr;
1419 unsigned int len, ftid;
1420
1421 len = sizeof(*fwr);
1422 ftid = adapter->tids.ftid_base + fidx;
1423
1424 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1425 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1426 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1427
1428 /* Mark the filter as "pending" and ship off the Filter Work Request.
1429 * When we get the Work Request Reply we'll clear the pending status.
1430 */
1431 f->pending = 1;
1432 t4_mgmt_tx(adapter, skb);
1433 return 0;
1434}
1435
688848b1
AB
1436static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1437 void *accel_priv, select_queue_fallback_t fallback)
1438{
1439 int txq;
1440
1441#ifdef CONFIG_CHELSIO_T4_DCB
1442 /* If a Data Center Bridging has been successfully negotiated on this
1443 * link then we'll use the skb's priority to map it to a TX Queue.
1444 * The skb's priority is determined via the VLAN Tag Priority Code
1445 * Point field.
1446 */
1447 if (cxgb4_dcb_enabled(dev)) {
1448 u16 vlan_tci;
1449 int err;
1450
1451 err = vlan_get_tag(skb, &vlan_tci);
1452 if (unlikely(err)) {
1453 if (net_ratelimit())
1454 netdev_warn(dev,
1455 "TX Packet without VLAN Tag on DCB Link\n");
1456 txq = 0;
1457 } else {
1458 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1459 }
1460 return txq;
1461 }
1462#endif /* CONFIG_CHELSIO_T4_DCB */
1463
1464 if (select_queue) {
1465 txq = (skb_rx_queue_recorded(skb)
1466 ? skb_get_rx_queue(skb)
1467 : smp_processor_id());
1468
1469 while (unlikely(txq >= dev->real_num_tx_queues))
1470 txq -= dev->real_num_tx_queues;
1471
1472 return txq;
1473 }
1474
1475 return fallback(dev, skb) % dev->real_num_tx_queues;
1476}
1477
b8ff05a9
DM
1478static inline int is_offload(const struct adapter *adap)
1479{
1480 return adap->params.offload;
1481}
1482
1483/*
1484 * Implementation of ethtool operations.
1485 */
1486
1487static u32 get_msglevel(struct net_device *dev)
1488{
1489 return netdev2adap(dev)->msg_enable;
1490}
1491
1492static void set_msglevel(struct net_device *dev, u32 val)
1493{
1494 netdev2adap(dev)->msg_enable = val;
1495}
1496
1497static char stats_strings[][ETH_GSTRING_LEN] = {
1498 "TxOctetsOK ",
1499 "TxFramesOK ",
1500 "TxBroadcastFrames ",
1501 "TxMulticastFrames ",
1502 "TxUnicastFrames ",
1503 "TxErrorFrames ",
1504
1505 "TxFrames64 ",
1506 "TxFrames65To127 ",
1507 "TxFrames128To255 ",
1508 "TxFrames256To511 ",
1509 "TxFrames512To1023 ",
1510 "TxFrames1024To1518 ",
1511 "TxFrames1519ToMax ",
1512
1513 "TxFramesDropped ",
1514 "TxPauseFrames ",
1515 "TxPPP0Frames ",
1516 "TxPPP1Frames ",
1517 "TxPPP2Frames ",
1518 "TxPPP3Frames ",
1519 "TxPPP4Frames ",
1520 "TxPPP5Frames ",
1521 "TxPPP6Frames ",
1522 "TxPPP7Frames ",
1523
1524 "RxOctetsOK ",
1525 "RxFramesOK ",
1526 "RxBroadcastFrames ",
1527 "RxMulticastFrames ",
1528 "RxUnicastFrames ",
1529
1530 "RxFramesTooLong ",
1531 "RxJabberErrors ",
1532 "RxFCSErrors ",
1533 "RxLengthErrors ",
1534 "RxSymbolErrors ",
1535 "RxRuntFrames ",
1536
1537 "RxFrames64 ",
1538 "RxFrames65To127 ",
1539 "RxFrames128To255 ",
1540 "RxFrames256To511 ",
1541 "RxFrames512To1023 ",
1542 "RxFrames1024To1518 ",
1543 "RxFrames1519ToMax ",
1544
1545 "RxPauseFrames ",
1546 "RxPPP0Frames ",
1547 "RxPPP1Frames ",
1548 "RxPPP2Frames ",
1549 "RxPPP3Frames ",
1550 "RxPPP4Frames ",
1551 "RxPPP5Frames ",
1552 "RxPPP6Frames ",
1553 "RxPPP7Frames ",
1554
1555 "RxBG0FramesDropped ",
1556 "RxBG1FramesDropped ",
1557 "RxBG2FramesDropped ",
1558 "RxBG3FramesDropped ",
1559 "RxBG0FramesTrunc ",
1560 "RxBG1FramesTrunc ",
1561 "RxBG2FramesTrunc ",
1562 "RxBG3FramesTrunc ",
1563
1564 "TSO ",
1565 "TxCsumOffload ",
1566 "RxCsumGood ",
1567 "VLANextractions ",
1568 "VLANinsertions ",
4a6346d4
DM
1569 "GROpackets ",
1570 "GROmerged ",
22adfe0a
SR
1571 "WriteCoalSuccess ",
1572 "WriteCoalFail ",
b8ff05a9
DM
1573};
1574
1575static int get_sset_count(struct net_device *dev, int sset)
1576{
1577 switch (sset) {
1578 case ETH_SS_STATS:
1579 return ARRAY_SIZE(stats_strings);
1580 default:
1581 return -EOPNOTSUPP;
1582 }
1583}
1584
1585#define T4_REGMAP_SIZE (160 * 1024)
251f9e88 1586#define T5_REGMAP_SIZE (332 * 1024)
b8ff05a9
DM
1587
1588static int get_regs_len(struct net_device *dev)
1589{
251f9e88 1590 struct adapter *adap = netdev2adap(dev);
d14807dd 1591 if (is_t4(adap->params.chip))
251f9e88
SR
1592 return T4_REGMAP_SIZE;
1593 else
1594 return T5_REGMAP_SIZE;
b8ff05a9
DM
1595}
1596
1597static int get_eeprom_len(struct net_device *dev)
1598{
1599 return EEPROMSIZE;
1600}
1601
1602static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1603{
1604 struct adapter *adapter = netdev2adap(dev);
1605
23020ab3
RJ
1606 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1607 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1608 strlcpy(info->bus_info, pci_name(adapter->pdev),
1609 sizeof(info->bus_info));
b8ff05a9 1610
84b40501 1611 if (adapter->params.fw_vers)
b8ff05a9
DM
1612 snprintf(info->fw_version, sizeof(info->fw_version),
1613 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1614 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1615 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1616 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1617 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1618 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1619 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1620 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1621 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1622}
1623
1624static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1625{
1626 if (stringset == ETH_SS_STATS)
1627 memcpy(data, stats_strings, sizeof(stats_strings));
1628}
1629
1630/*
1631 * port stats maintained per queue of the port. They should be in the same
1632 * order as in stats_strings above.
1633 */
1634struct queue_port_stats {
1635 u64 tso;
1636 u64 tx_csum;
1637 u64 rx_csum;
1638 u64 vlan_ex;
1639 u64 vlan_ins;
4a6346d4
DM
1640 u64 gro_pkts;
1641 u64 gro_merged;
b8ff05a9
DM
1642};
1643
1644static void collect_sge_port_stats(const struct adapter *adap,
1645 const struct port_info *p, struct queue_port_stats *s)
1646{
1647 int i;
1648 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1649 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1650
1651 memset(s, 0, sizeof(*s));
1652 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1653 s->tso += tx->tso;
1654 s->tx_csum += tx->tx_cso;
1655 s->rx_csum += rx->stats.rx_cso;
1656 s->vlan_ex += rx->stats.vlan_ex;
1657 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1658 s->gro_pkts += rx->stats.lro_pkts;
1659 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1660 }
1661}
1662
1663static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1664 u64 *data)
1665{
1666 struct port_info *pi = netdev_priv(dev);
1667 struct adapter *adapter = pi->adapter;
22adfe0a 1668 u32 val1, val2;
b8ff05a9
DM
1669
1670 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1671
1672 data += sizeof(struct port_stats) / sizeof(u64);
1673 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
22adfe0a 1674 data += sizeof(struct queue_port_stats) / sizeof(u64);
d14807dd 1675 if (!is_t4(adapter->params.chip)) {
22adfe0a
SR
1676 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1677 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1678 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1679 *data = val1 - val2;
1680 data++;
1681 *data = val2;
1682 data++;
1683 } else {
1684 memset(data, 0, 2 * sizeof(u64));
1685 *data += 2;
1686 }
b8ff05a9
DM
1687}
1688
1689/*
1690 * Return a version number to identify the type of adapter. The scheme is:
1691 * - bits 0..9: chip version
1692 * - bits 10..15: chip revision
835bb606 1693 * - bits 16..23: register dump version
b8ff05a9
DM
1694 */
1695static inline unsigned int mk_adap_vers(const struct adapter *ap)
1696{
d14807dd
HS
1697 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1698 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
b8ff05a9
DM
1699}
1700
1701static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1702 unsigned int end)
1703{
1704 u32 *p = buf + start;
1705
1706 for ( ; start <= end; start += sizeof(u32))
1707 *p++ = t4_read_reg(ap, start);
1708}
1709
1710static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1711 void *buf)
1712{
251f9e88 1713 static const unsigned int t4_reg_ranges[] = {
b8ff05a9
DM
1714 0x1008, 0x1108,
1715 0x1180, 0x11b4,
1716 0x11fc, 0x123c,
1717 0x1300, 0x173c,
1718 0x1800, 0x18fc,
1719 0x3000, 0x30d8,
1720 0x30e0, 0x5924,
1721 0x5960, 0x59d4,
1722 0x5a00, 0x5af8,
1723 0x6000, 0x6098,
1724 0x6100, 0x6150,
1725 0x6200, 0x6208,
1726 0x6240, 0x6248,
1727 0x6280, 0x6338,
1728 0x6370, 0x638c,
1729 0x6400, 0x643c,
1730 0x6500, 0x6524,
1731 0x6a00, 0x6a38,
1732 0x6a60, 0x6a78,
1733 0x6b00, 0x6b84,
1734 0x6bf0, 0x6c84,
1735 0x6cf0, 0x6d84,
1736 0x6df0, 0x6e84,
1737 0x6ef0, 0x6f84,
1738 0x6ff0, 0x7084,
1739 0x70f0, 0x7184,
1740 0x71f0, 0x7284,
1741 0x72f0, 0x7384,
1742 0x73f0, 0x7450,
1743 0x7500, 0x7530,
1744 0x7600, 0x761c,
1745 0x7680, 0x76cc,
1746 0x7700, 0x7798,
1747 0x77c0, 0x77fc,
1748 0x7900, 0x79fc,
1749 0x7b00, 0x7c38,
1750 0x7d00, 0x7efc,
1751 0x8dc0, 0x8e1c,
1752 0x8e30, 0x8e78,
1753 0x8ea0, 0x8f6c,
1754 0x8fc0, 0x9074,
1755 0x90fc, 0x90fc,
1756 0x9400, 0x9458,
1757 0x9600, 0x96bc,
1758 0x9800, 0x9808,
1759 0x9820, 0x983c,
1760 0x9850, 0x9864,
1761 0x9c00, 0x9c6c,
1762 0x9c80, 0x9cec,
1763 0x9d00, 0x9d6c,
1764 0x9d80, 0x9dec,
1765 0x9e00, 0x9e6c,
1766 0x9e80, 0x9eec,
1767 0x9f00, 0x9f6c,
1768 0x9f80, 0x9fec,
1769 0xd004, 0xd03c,
1770 0xdfc0, 0xdfe0,
1771 0xe000, 0xea7c,
3d9103f8
HS
1772 0xf000, 0x11110,
1773 0x11118, 0x11190,
835bb606
DM
1774 0x19040, 0x1906c,
1775 0x19078, 0x19080,
1776 0x1908c, 0x19124,
b8ff05a9
DM
1777 0x19150, 0x191b0,
1778 0x191d0, 0x191e8,
1779 0x19238, 0x1924c,
1780 0x193f8, 0x19474,
1781 0x19490, 0x194f8,
1782 0x19800, 0x19f30,
1783 0x1a000, 0x1a06c,
1784 0x1a0b0, 0x1a120,
1785 0x1a128, 0x1a138,
1786 0x1a190, 0x1a1c4,
1787 0x1a1fc, 0x1a1fc,
1788 0x1e040, 0x1e04c,
835bb606 1789 0x1e284, 0x1e28c,
b8ff05a9
DM
1790 0x1e2c0, 0x1e2c0,
1791 0x1e2e0, 0x1e2e0,
1792 0x1e300, 0x1e384,
1793 0x1e3c0, 0x1e3c8,
1794 0x1e440, 0x1e44c,
835bb606 1795 0x1e684, 0x1e68c,
b8ff05a9
DM
1796 0x1e6c0, 0x1e6c0,
1797 0x1e6e0, 0x1e6e0,
1798 0x1e700, 0x1e784,
1799 0x1e7c0, 0x1e7c8,
1800 0x1e840, 0x1e84c,
835bb606 1801 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1802 0x1eac0, 0x1eac0,
1803 0x1eae0, 0x1eae0,
1804 0x1eb00, 0x1eb84,
1805 0x1ebc0, 0x1ebc8,
1806 0x1ec40, 0x1ec4c,
835bb606 1807 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1808 0x1eec0, 0x1eec0,
1809 0x1eee0, 0x1eee0,
1810 0x1ef00, 0x1ef84,
1811 0x1efc0, 0x1efc8,
1812 0x1f040, 0x1f04c,
835bb606 1813 0x1f284, 0x1f28c,
b8ff05a9
DM
1814 0x1f2c0, 0x1f2c0,
1815 0x1f2e0, 0x1f2e0,
1816 0x1f300, 0x1f384,
1817 0x1f3c0, 0x1f3c8,
1818 0x1f440, 0x1f44c,
835bb606 1819 0x1f684, 0x1f68c,
b8ff05a9
DM
1820 0x1f6c0, 0x1f6c0,
1821 0x1f6e0, 0x1f6e0,
1822 0x1f700, 0x1f784,
1823 0x1f7c0, 0x1f7c8,
1824 0x1f840, 0x1f84c,
835bb606 1825 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1826 0x1fac0, 0x1fac0,
1827 0x1fae0, 0x1fae0,
1828 0x1fb00, 0x1fb84,
1829 0x1fbc0, 0x1fbc8,
1830 0x1fc40, 0x1fc4c,
835bb606 1831 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1832 0x1fec0, 0x1fec0,
1833 0x1fee0, 0x1fee0,
1834 0x1ff00, 0x1ff84,
1835 0x1ffc0, 0x1ffc8,
1836 0x20000, 0x2002c,
1837 0x20100, 0x2013c,
1838 0x20190, 0x201c8,
1839 0x20200, 0x20318,
1840 0x20400, 0x20528,
1841 0x20540, 0x20614,
1842 0x21000, 0x21040,
1843 0x2104c, 0x21060,
1844 0x210c0, 0x210ec,
1845 0x21200, 0x21268,
1846 0x21270, 0x21284,
1847 0x212fc, 0x21388,
1848 0x21400, 0x21404,
1849 0x21500, 0x21518,
1850 0x2152c, 0x2153c,
1851 0x21550, 0x21554,
1852 0x21600, 0x21600,
1853 0x21608, 0x21628,
1854 0x21630, 0x2163c,
1855 0x21700, 0x2171c,
1856 0x21780, 0x2178c,
1857 0x21800, 0x21c38,
1858 0x21c80, 0x21d7c,
1859 0x21e00, 0x21e04,
1860 0x22000, 0x2202c,
1861 0x22100, 0x2213c,
1862 0x22190, 0x221c8,
1863 0x22200, 0x22318,
1864 0x22400, 0x22528,
1865 0x22540, 0x22614,
1866 0x23000, 0x23040,
1867 0x2304c, 0x23060,
1868 0x230c0, 0x230ec,
1869 0x23200, 0x23268,
1870 0x23270, 0x23284,
1871 0x232fc, 0x23388,
1872 0x23400, 0x23404,
1873 0x23500, 0x23518,
1874 0x2352c, 0x2353c,
1875 0x23550, 0x23554,
1876 0x23600, 0x23600,
1877 0x23608, 0x23628,
1878 0x23630, 0x2363c,
1879 0x23700, 0x2371c,
1880 0x23780, 0x2378c,
1881 0x23800, 0x23c38,
1882 0x23c80, 0x23d7c,
1883 0x23e00, 0x23e04,
1884 0x24000, 0x2402c,
1885 0x24100, 0x2413c,
1886 0x24190, 0x241c8,
1887 0x24200, 0x24318,
1888 0x24400, 0x24528,
1889 0x24540, 0x24614,
1890 0x25000, 0x25040,
1891 0x2504c, 0x25060,
1892 0x250c0, 0x250ec,
1893 0x25200, 0x25268,
1894 0x25270, 0x25284,
1895 0x252fc, 0x25388,
1896 0x25400, 0x25404,
1897 0x25500, 0x25518,
1898 0x2552c, 0x2553c,
1899 0x25550, 0x25554,
1900 0x25600, 0x25600,
1901 0x25608, 0x25628,
1902 0x25630, 0x2563c,
1903 0x25700, 0x2571c,
1904 0x25780, 0x2578c,
1905 0x25800, 0x25c38,
1906 0x25c80, 0x25d7c,
1907 0x25e00, 0x25e04,
1908 0x26000, 0x2602c,
1909 0x26100, 0x2613c,
1910 0x26190, 0x261c8,
1911 0x26200, 0x26318,
1912 0x26400, 0x26528,
1913 0x26540, 0x26614,
1914 0x27000, 0x27040,
1915 0x2704c, 0x27060,
1916 0x270c0, 0x270ec,
1917 0x27200, 0x27268,
1918 0x27270, 0x27284,
1919 0x272fc, 0x27388,
1920 0x27400, 0x27404,
1921 0x27500, 0x27518,
1922 0x2752c, 0x2753c,
1923 0x27550, 0x27554,
1924 0x27600, 0x27600,
1925 0x27608, 0x27628,
1926 0x27630, 0x2763c,
1927 0x27700, 0x2771c,
1928 0x27780, 0x2778c,
1929 0x27800, 0x27c38,
1930 0x27c80, 0x27d7c,
1931 0x27e00, 0x27e04
1932 };
1933
251f9e88
SR
1934 static const unsigned int t5_reg_ranges[] = {
1935 0x1008, 0x1148,
1936 0x1180, 0x11b4,
1937 0x11fc, 0x123c,
1938 0x1280, 0x173c,
1939 0x1800, 0x18fc,
1940 0x3000, 0x3028,
1941 0x3060, 0x30d8,
1942 0x30e0, 0x30fc,
1943 0x3140, 0x357c,
1944 0x35a8, 0x35cc,
1945 0x35ec, 0x35ec,
1946 0x3600, 0x5624,
1947 0x56cc, 0x575c,
1948 0x580c, 0x5814,
1949 0x5890, 0x58bc,
1950 0x5940, 0x59dc,
1951 0x59fc, 0x5a18,
1952 0x5a60, 0x5a9c,
1953 0x5b9c, 0x5bfc,
1954 0x6000, 0x6040,
1955 0x6058, 0x614c,
1956 0x7700, 0x7798,
1957 0x77c0, 0x78fc,
1958 0x7b00, 0x7c54,
1959 0x7d00, 0x7efc,
1960 0x8dc0, 0x8de0,
1961 0x8df8, 0x8e84,
1962 0x8ea0, 0x8f84,
1963 0x8fc0, 0x90f8,
1964 0x9400, 0x9470,
1965 0x9600, 0x96f4,
1966 0x9800, 0x9808,
1967 0x9820, 0x983c,
1968 0x9850, 0x9864,
1969 0x9c00, 0x9c6c,
1970 0x9c80, 0x9cec,
1971 0x9d00, 0x9d6c,
1972 0x9d80, 0x9dec,
1973 0x9e00, 0x9e6c,
1974 0x9e80, 0x9eec,
1975 0x9f00, 0x9f6c,
1976 0x9f80, 0xa020,
1977 0xd004, 0xd03c,
1978 0xdfc0, 0xdfe0,
1979 0xe000, 0x11088,
3d9103f8
HS
1980 0x1109c, 0x11110,
1981 0x11118, 0x1117c,
251f9e88
SR
1982 0x11190, 0x11204,
1983 0x19040, 0x1906c,
1984 0x19078, 0x19080,
1985 0x1908c, 0x19124,
1986 0x19150, 0x191b0,
1987 0x191d0, 0x191e8,
1988 0x19238, 0x19290,
1989 0x193f8, 0x19474,
1990 0x19490, 0x194cc,
1991 0x194f0, 0x194f8,
1992 0x19c00, 0x19c60,
1993 0x19c94, 0x19e10,
1994 0x19e50, 0x19f34,
1995 0x19f40, 0x19f50,
1996 0x19f90, 0x19fe4,
1997 0x1a000, 0x1a06c,
1998 0x1a0b0, 0x1a120,
1999 0x1a128, 0x1a138,
2000 0x1a190, 0x1a1c4,
2001 0x1a1fc, 0x1a1fc,
2002 0x1e008, 0x1e00c,
2003 0x1e040, 0x1e04c,
2004 0x1e284, 0x1e290,
2005 0x1e2c0, 0x1e2c0,
2006 0x1e2e0, 0x1e2e0,
2007 0x1e300, 0x1e384,
2008 0x1e3c0, 0x1e3c8,
2009 0x1e408, 0x1e40c,
2010 0x1e440, 0x1e44c,
2011 0x1e684, 0x1e690,
2012 0x1e6c0, 0x1e6c0,
2013 0x1e6e0, 0x1e6e0,
2014 0x1e700, 0x1e784,
2015 0x1e7c0, 0x1e7c8,
2016 0x1e808, 0x1e80c,
2017 0x1e840, 0x1e84c,
2018 0x1ea84, 0x1ea90,
2019 0x1eac0, 0x1eac0,
2020 0x1eae0, 0x1eae0,
2021 0x1eb00, 0x1eb84,
2022 0x1ebc0, 0x1ebc8,
2023 0x1ec08, 0x1ec0c,
2024 0x1ec40, 0x1ec4c,
2025 0x1ee84, 0x1ee90,
2026 0x1eec0, 0x1eec0,
2027 0x1eee0, 0x1eee0,
2028 0x1ef00, 0x1ef84,
2029 0x1efc0, 0x1efc8,
2030 0x1f008, 0x1f00c,
2031 0x1f040, 0x1f04c,
2032 0x1f284, 0x1f290,
2033 0x1f2c0, 0x1f2c0,
2034 0x1f2e0, 0x1f2e0,
2035 0x1f300, 0x1f384,
2036 0x1f3c0, 0x1f3c8,
2037 0x1f408, 0x1f40c,
2038 0x1f440, 0x1f44c,
2039 0x1f684, 0x1f690,
2040 0x1f6c0, 0x1f6c0,
2041 0x1f6e0, 0x1f6e0,
2042 0x1f700, 0x1f784,
2043 0x1f7c0, 0x1f7c8,
2044 0x1f808, 0x1f80c,
2045 0x1f840, 0x1f84c,
2046 0x1fa84, 0x1fa90,
2047 0x1fac0, 0x1fac0,
2048 0x1fae0, 0x1fae0,
2049 0x1fb00, 0x1fb84,
2050 0x1fbc0, 0x1fbc8,
2051 0x1fc08, 0x1fc0c,
2052 0x1fc40, 0x1fc4c,
2053 0x1fe84, 0x1fe90,
2054 0x1fec0, 0x1fec0,
2055 0x1fee0, 0x1fee0,
2056 0x1ff00, 0x1ff84,
2057 0x1ffc0, 0x1ffc8,
2058 0x30000, 0x30030,
2059 0x30100, 0x30144,
2060 0x30190, 0x301d0,
2061 0x30200, 0x30318,
2062 0x30400, 0x3052c,
2063 0x30540, 0x3061c,
2064 0x30800, 0x30834,
2065 0x308c0, 0x30908,
2066 0x30910, 0x309ac,
2067 0x30a00, 0x30a04,
2068 0x30a0c, 0x30a2c,
2069 0x30a44, 0x30a50,
2070 0x30a74, 0x30c24,
2071 0x30d08, 0x30d14,
2072 0x30d1c, 0x30d20,
2073 0x30d3c, 0x30d50,
2074 0x31200, 0x3120c,
2075 0x31220, 0x31220,
2076 0x31240, 0x31240,
2077 0x31600, 0x31600,
2078 0x31608, 0x3160c,
2079 0x31a00, 0x31a1c,
2080 0x31e04, 0x31e20,
2081 0x31e38, 0x31e3c,
2082 0x31e80, 0x31e80,
2083 0x31e88, 0x31ea8,
2084 0x31eb0, 0x31eb4,
2085 0x31ec8, 0x31ed4,
2086 0x31fb8, 0x32004,
2087 0x32208, 0x3223c,
2088 0x32600, 0x32630,
2089 0x32a00, 0x32abc,
2090 0x32b00, 0x32b70,
2091 0x33000, 0x33048,
2092 0x33060, 0x3309c,
2093 0x330f0, 0x33148,
2094 0x33160, 0x3319c,
2095 0x331f0, 0x332e4,
2096 0x332f8, 0x333e4,
2097 0x333f8, 0x33448,
2098 0x33460, 0x3349c,
2099 0x334f0, 0x33548,
2100 0x33560, 0x3359c,
2101 0x335f0, 0x336e4,
2102 0x336f8, 0x337e4,
2103 0x337f8, 0x337fc,
2104 0x33814, 0x33814,
2105 0x3382c, 0x3382c,
2106 0x33880, 0x3388c,
2107 0x338e8, 0x338ec,
2108 0x33900, 0x33948,
2109 0x33960, 0x3399c,
2110 0x339f0, 0x33ae4,
2111 0x33af8, 0x33b10,
2112 0x33b28, 0x33b28,
2113 0x33b3c, 0x33b50,
2114 0x33bf0, 0x33c10,
2115 0x33c28, 0x33c28,
2116 0x33c3c, 0x33c50,
2117 0x33cf0, 0x33cfc,
2118 0x34000, 0x34030,
2119 0x34100, 0x34144,
2120 0x34190, 0x341d0,
2121 0x34200, 0x34318,
2122 0x34400, 0x3452c,
2123 0x34540, 0x3461c,
2124 0x34800, 0x34834,
2125 0x348c0, 0x34908,
2126 0x34910, 0x349ac,
2127 0x34a00, 0x34a04,
2128 0x34a0c, 0x34a2c,
2129 0x34a44, 0x34a50,
2130 0x34a74, 0x34c24,
2131 0x34d08, 0x34d14,
2132 0x34d1c, 0x34d20,
2133 0x34d3c, 0x34d50,
2134 0x35200, 0x3520c,
2135 0x35220, 0x35220,
2136 0x35240, 0x35240,
2137 0x35600, 0x35600,
2138 0x35608, 0x3560c,
2139 0x35a00, 0x35a1c,
2140 0x35e04, 0x35e20,
2141 0x35e38, 0x35e3c,
2142 0x35e80, 0x35e80,
2143 0x35e88, 0x35ea8,
2144 0x35eb0, 0x35eb4,
2145 0x35ec8, 0x35ed4,
2146 0x35fb8, 0x36004,
2147 0x36208, 0x3623c,
2148 0x36600, 0x36630,
2149 0x36a00, 0x36abc,
2150 0x36b00, 0x36b70,
2151 0x37000, 0x37048,
2152 0x37060, 0x3709c,
2153 0x370f0, 0x37148,
2154 0x37160, 0x3719c,
2155 0x371f0, 0x372e4,
2156 0x372f8, 0x373e4,
2157 0x373f8, 0x37448,
2158 0x37460, 0x3749c,
2159 0x374f0, 0x37548,
2160 0x37560, 0x3759c,
2161 0x375f0, 0x376e4,
2162 0x376f8, 0x377e4,
2163 0x377f8, 0x377fc,
2164 0x37814, 0x37814,
2165 0x3782c, 0x3782c,
2166 0x37880, 0x3788c,
2167 0x378e8, 0x378ec,
2168 0x37900, 0x37948,
2169 0x37960, 0x3799c,
2170 0x379f0, 0x37ae4,
2171 0x37af8, 0x37b10,
2172 0x37b28, 0x37b28,
2173 0x37b3c, 0x37b50,
2174 0x37bf0, 0x37c10,
2175 0x37c28, 0x37c28,
2176 0x37c3c, 0x37c50,
2177 0x37cf0, 0x37cfc,
2178 0x38000, 0x38030,
2179 0x38100, 0x38144,
2180 0x38190, 0x381d0,
2181 0x38200, 0x38318,
2182 0x38400, 0x3852c,
2183 0x38540, 0x3861c,
2184 0x38800, 0x38834,
2185 0x388c0, 0x38908,
2186 0x38910, 0x389ac,
2187 0x38a00, 0x38a04,
2188 0x38a0c, 0x38a2c,
2189 0x38a44, 0x38a50,
2190 0x38a74, 0x38c24,
2191 0x38d08, 0x38d14,
2192 0x38d1c, 0x38d20,
2193 0x38d3c, 0x38d50,
2194 0x39200, 0x3920c,
2195 0x39220, 0x39220,
2196 0x39240, 0x39240,
2197 0x39600, 0x39600,
2198 0x39608, 0x3960c,
2199 0x39a00, 0x39a1c,
2200 0x39e04, 0x39e20,
2201 0x39e38, 0x39e3c,
2202 0x39e80, 0x39e80,
2203 0x39e88, 0x39ea8,
2204 0x39eb0, 0x39eb4,
2205 0x39ec8, 0x39ed4,
2206 0x39fb8, 0x3a004,
2207 0x3a208, 0x3a23c,
2208 0x3a600, 0x3a630,
2209 0x3aa00, 0x3aabc,
2210 0x3ab00, 0x3ab70,
2211 0x3b000, 0x3b048,
2212 0x3b060, 0x3b09c,
2213 0x3b0f0, 0x3b148,
2214 0x3b160, 0x3b19c,
2215 0x3b1f0, 0x3b2e4,
2216 0x3b2f8, 0x3b3e4,
2217 0x3b3f8, 0x3b448,
2218 0x3b460, 0x3b49c,
2219 0x3b4f0, 0x3b548,
2220 0x3b560, 0x3b59c,
2221 0x3b5f0, 0x3b6e4,
2222 0x3b6f8, 0x3b7e4,
2223 0x3b7f8, 0x3b7fc,
2224 0x3b814, 0x3b814,
2225 0x3b82c, 0x3b82c,
2226 0x3b880, 0x3b88c,
2227 0x3b8e8, 0x3b8ec,
2228 0x3b900, 0x3b948,
2229 0x3b960, 0x3b99c,
2230 0x3b9f0, 0x3bae4,
2231 0x3baf8, 0x3bb10,
2232 0x3bb28, 0x3bb28,
2233 0x3bb3c, 0x3bb50,
2234 0x3bbf0, 0x3bc10,
2235 0x3bc28, 0x3bc28,
2236 0x3bc3c, 0x3bc50,
2237 0x3bcf0, 0x3bcfc,
2238 0x3c000, 0x3c030,
2239 0x3c100, 0x3c144,
2240 0x3c190, 0x3c1d0,
2241 0x3c200, 0x3c318,
2242 0x3c400, 0x3c52c,
2243 0x3c540, 0x3c61c,
2244 0x3c800, 0x3c834,
2245 0x3c8c0, 0x3c908,
2246 0x3c910, 0x3c9ac,
2247 0x3ca00, 0x3ca04,
2248 0x3ca0c, 0x3ca2c,
2249 0x3ca44, 0x3ca50,
2250 0x3ca74, 0x3cc24,
2251 0x3cd08, 0x3cd14,
2252 0x3cd1c, 0x3cd20,
2253 0x3cd3c, 0x3cd50,
2254 0x3d200, 0x3d20c,
2255 0x3d220, 0x3d220,
2256 0x3d240, 0x3d240,
2257 0x3d600, 0x3d600,
2258 0x3d608, 0x3d60c,
2259 0x3da00, 0x3da1c,
2260 0x3de04, 0x3de20,
2261 0x3de38, 0x3de3c,
2262 0x3de80, 0x3de80,
2263 0x3de88, 0x3dea8,
2264 0x3deb0, 0x3deb4,
2265 0x3dec8, 0x3ded4,
2266 0x3dfb8, 0x3e004,
2267 0x3e208, 0x3e23c,
2268 0x3e600, 0x3e630,
2269 0x3ea00, 0x3eabc,
2270 0x3eb00, 0x3eb70,
2271 0x3f000, 0x3f048,
2272 0x3f060, 0x3f09c,
2273 0x3f0f0, 0x3f148,
2274 0x3f160, 0x3f19c,
2275 0x3f1f0, 0x3f2e4,
2276 0x3f2f8, 0x3f3e4,
2277 0x3f3f8, 0x3f448,
2278 0x3f460, 0x3f49c,
2279 0x3f4f0, 0x3f548,
2280 0x3f560, 0x3f59c,
2281 0x3f5f0, 0x3f6e4,
2282 0x3f6f8, 0x3f7e4,
2283 0x3f7f8, 0x3f7fc,
2284 0x3f814, 0x3f814,
2285 0x3f82c, 0x3f82c,
2286 0x3f880, 0x3f88c,
2287 0x3f8e8, 0x3f8ec,
2288 0x3f900, 0x3f948,
2289 0x3f960, 0x3f99c,
2290 0x3f9f0, 0x3fae4,
2291 0x3faf8, 0x3fb10,
2292 0x3fb28, 0x3fb28,
2293 0x3fb3c, 0x3fb50,
2294 0x3fbf0, 0x3fc10,
2295 0x3fc28, 0x3fc28,
2296 0x3fc3c, 0x3fc50,
2297 0x3fcf0, 0x3fcfc,
2298 0x40000, 0x4000c,
2299 0x40040, 0x40068,
2300 0x40080, 0x40144,
2301 0x40180, 0x4018c,
2302 0x40200, 0x40298,
2303 0x402ac, 0x4033c,
2304 0x403f8, 0x403fc,
c1f49e3e 2305 0x41304, 0x413c4,
251f9e88
SR
2306 0x41400, 0x4141c,
2307 0x41480, 0x414d0,
2308 0x44000, 0x44078,
2309 0x440c0, 0x44278,
2310 0x442c0, 0x44478,
2311 0x444c0, 0x44678,
2312 0x446c0, 0x44878,
2313 0x448c0, 0x449fc,
2314 0x45000, 0x45068,
2315 0x45080, 0x45084,
2316 0x450a0, 0x450b0,
2317 0x45200, 0x45268,
2318 0x45280, 0x45284,
2319 0x452a0, 0x452b0,
2320 0x460c0, 0x460e4,
2321 0x47000, 0x4708c,
2322 0x47200, 0x47250,
2323 0x47400, 0x47420,
2324 0x47600, 0x47618,
2325 0x47800, 0x47814,
2326 0x48000, 0x4800c,
2327 0x48040, 0x48068,
2328 0x48080, 0x48144,
2329 0x48180, 0x4818c,
2330 0x48200, 0x48298,
2331 0x482ac, 0x4833c,
2332 0x483f8, 0x483fc,
c1f49e3e 2333 0x49304, 0x493c4,
251f9e88
SR
2334 0x49400, 0x4941c,
2335 0x49480, 0x494d0,
2336 0x4c000, 0x4c078,
2337 0x4c0c0, 0x4c278,
2338 0x4c2c0, 0x4c478,
2339 0x4c4c0, 0x4c678,
2340 0x4c6c0, 0x4c878,
2341 0x4c8c0, 0x4c9fc,
2342 0x4d000, 0x4d068,
2343 0x4d080, 0x4d084,
2344 0x4d0a0, 0x4d0b0,
2345 0x4d200, 0x4d268,
2346 0x4d280, 0x4d284,
2347 0x4d2a0, 0x4d2b0,
2348 0x4e0c0, 0x4e0e4,
2349 0x4f000, 0x4f08c,
2350 0x4f200, 0x4f250,
2351 0x4f400, 0x4f420,
2352 0x4f600, 0x4f618,
2353 0x4f800, 0x4f814,
2354 0x50000, 0x500cc,
2355 0x50400, 0x50400,
2356 0x50800, 0x508cc,
2357 0x50c00, 0x50c00,
2358 0x51000, 0x5101c,
2359 0x51300, 0x51308,
2360 };
2361
b8ff05a9
DM
2362 int i;
2363 struct adapter *ap = netdev2adap(dev);
251f9e88
SR
2364 static const unsigned int *reg_ranges;
2365 int arr_size = 0, buf_size = 0;
2366
d14807dd 2367 if (is_t4(ap->params.chip)) {
251f9e88
SR
2368 reg_ranges = &t4_reg_ranges[0];
2369 arr_size = ARRAY_SIZE(t4_reg_ranges);
2370 buf_size = T4_REGMAP_SIZE;
2371 } else {
2372 reg_ranges = &t5_reg_ranges[0];
2373 arr_size = ARRAY_SIZE(t5_reg_ranges);
2374 buf_size = T5_REGMAP_SIZE;
2375 }
b8ff05a9
DM
2376
2377 regs->version = mk_adap_vers(ap);
2378
251f9e88
SR
2379 memset(buf, 0, buf_size);
2380 for (i = 0; i < arr_size; i += 2)
b8ff05a9
DM
2381 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2382}
2383
2384static int restart_autoneg(struct net_device *dev)
2385{
2386 struct port_info *p = netdev_priv(dev);
2387
2388 if (!netif_running(dev))
2389 return -EAGAIN;
2390 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2391 return -EINVAL;
060e0c75 2392 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
2393 return 0;
2394}
2395
c5e06360
DM
2396static int identify_port(struct net_device *dev,
2397 enum ethtool_phys_id_state state)
b8ff05a9 2398{
c5e06360 2399 unsigned int val;
060e0c75
DM
2400 struct adapter *adap = netdev2adap(dev);
2401
c5e06360
DM
2402 if (state == ETHTOOL_ID_ACTIVE)
2403 val = 0xffff;
2404 else if (state == ETHTOOL_ID_INACTIVE)
2405 val = 0;
2406 else
2407 return -EINVAL;
b8ff05a9 2408
c5e06360 2409 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
2410}
2411
2412static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2413{
2414 unsigned int v = 0;
2415
a0881cab
DM
2416 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2417 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
2418 v |= SUPPORTED_TP;
2419 if (caps & FW_PORT_CAP_SPEED_100M)
2420 v |= SUPPORTED_100baseT_Full;
2421 if (caps & FW_PORT_CAP_SPEED_1G)
2422 v |= SUPPORTED_1000baseT_Full;
2423 if (caps & FW_PORT_CAP_SPEED_10G)
2424 v |= SUPPORTED_10000baseT_Full;
2425 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2426 v |= SUPPORTED_Backplane;
2427 if (caps & FW_PORT_CAP_SPEED_1G)
2428 v |= SUPPORTED_1000baseKX_Full;
2429 if (caps & FW_PORT_CAP_SPEED_10G)
2430 v |= SUPPORTED_10000baseKX4_Full;
2431 } else if (type == FW_PORT_TYPE_KR)
2432 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 2433 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
2434 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2435 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2436 else if (type == FW_PORT_TYPE_BP4_AP)
2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2438 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2439 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
2440 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2441 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9 2442 v |= SUPPORTED_FIBRE;
72aca4bf
KS
2443 else if (type == FW_PORT_TYPE_BP40_BA)
2444 v |= SUPPORTED_40000baseSR4_Full;
b8ff05a9
DM
2445
2446 if (caps & FW_PORT_CAP_ANEG)
2447 v |= SUPPORTED_Autoneg;
2448 return v;
2449}
2450
2451static unsigned int to_fw_linkcaps(unsigned int caps)
2452{
2453 unsigned int v = 0;
2454
2455 if (caps & ADVERTISED_100baseT_Full)
2456 v |= FW_PORT_CAP_SPEED_100M;
2457 if (caps & ADVERTISED_1000baseT_Full)
2458 v |= FW_PORT_CAP_SPEED_1G;
2459 if (caps & ADVERTISED_10000baseT_Full)
2460 v |= FW_PORT_CAP_SPEED_10G;
72aca4bf
KS
2461 if (caps & ADVERTISED_40000baseSR4_Full)
2462 v |= FW_PORT_CAP_SPEED_40G;
b8ff05a9
DM
2463 return v;
2464}
2465
2466static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2467{
2468 const struct port_info *p = netdev_priv(dev);
2469
2470 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 2471 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
2472 p->port_type == FW_PORT_TYPE_BT_XAUI)
2473 cmd->port = PORT_TP;
a0881cab
DM
2474 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2475 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 2476 cmd->port = PORT_FIBRE;
3e00a509
HS
2477 else if (p->port_type == FW_PORT_TYPE_SFP ||
2478 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2479 p->port_type == FW_PORT_TYPE_QSFP) {
2480 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2481 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2482 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2483 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2484 cmd->port = PORT_FIBRE;
2485 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2486 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
a0881cab
DM
2487 cmd->port = PORT_DA;
2488 else
3e00a509 2489 cmd->port = PORT_OTHER;
a0881cab 2490 } else
b8ff05a9
DM
2491 cmd->port = PORT_OTHER;
2492
2493 if (p->mdio_addr >= 0) {
2494 cmd->phy_address = p->mdio_addr;
2495 cmd->transceiver = XCVR_EXTERNAL;
2496 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2497 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2498 } else {
2499 cmd->phy_address = 0; /* not really, but no better option */
2500 cmd->transceiver = XCVR_INTERNAL;
2501 cmd->mdio_support = 0;
2502 }
2503
2504 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2505 cmd->advertising = from_fw_linkcaps(p->port_type,
2506 p->link_cfg.advertising);
70739497
DD
2507 ethtool_cmd_speed_set(cmd,
2508 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
2509 cmd->duplex = DUPLEX_FULL;
2510 cmd->autoneg = p->link_cfg.autoneg;
2511 cmd->maxtxpkt = 0;
2512 cmd->maxrxpkt = 0;
2513 return 0;
2514}
2515
2516static unsigned int speed_to_caps(int speed)
2517{
e8b39015 2518 if (speed == 100)
b8ff05a9 2519 return FW_PORT_CAP_SPEED_100M;
e8b39015 2520 if (speed == 1000)
b8ff05a9 2521 return FW_PORT_CAP_SPEED_1G;
e8b39015 2522 if (speed == 10000)
b8ff05a9 2523 return FW_PORT_CAP_SPEED_10G;
e8b39015 2524 if (speed == 40000)
72aca4bf 2525 return FW_PORT_CAP_SPEED_40G;
b8ff05a9
DM
2526 return 0;
2527}
2528
2529static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2530{
2531 unsigned int cap;
2532 struct port_info *p = netdev_priv(dev);
2533 struct link_config *lc = &p->link_cfg;
25db0338 2534 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
2535
2536 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2537 return -EINVAL;
2538
2539 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2540 /*
2541 * PHY offers a single speed. See if that's what's
2542 * being requested.
2543 */
2544 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
2545 (lc->supported & speed_to_caps(speed)))
2546 return 0;
b8ff05a9
DM
2547 return -EINVAL;
2548 }
2549
2550 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 2551 cap = speed_to_caps(speed);
b8ff05a9 2552
72aca4bf 2553 if (!(lc->supported & cap) ||
e8b39015
BH
2554 (speed == 1000) ||
2555 (speed == 10000) ||
72aca4bf 2556 (speed == 40000))
b8ff05a9
DM
2557 return -EINVAL;
2558 lc->requested_speed = cap;
2559 lc->advertising = 0;
2560 } else {
2561 cap = to_fw_linkcaps(cmd->advertising);
2562 if (!(lc->supported & cap))
2563 return -EINVAL;
2564 lc->requested_speed = 0;
2565 lc->advertising = cap | FW_PORT_CAP_ANEG;
2566 }
2567 lc->autoneg = cmd->autoneg;
2568
2569 if (netif_running(dev))
060e0c75
DM
2570 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2571 lc);
b8ff05a9
DM
2572 return 0;
2573}
2574
2575static void get_pauseparam(struct net_device *dev,
2576 struct ethtool_pauseparam *epause)
2577{
2578 struct port_info *p = netdev_priv(dev);
2579
2580 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2581 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2582 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2583}
2584
2585static int set_pauseparam(struct net_device *dev,
2586 struct ethtool_pauseparam *epause)
2587{
2588 struct port_info *p = netdev_priv(dev);
2589 struct link_config *lc = &p->link_cfg;
2590
2591 if (epause->autoneg == AUTONEG_DISABLE)
2592 lc->requested_fc = 0;
2593 else if (lc->supported & FW_PORT_CAP_ANEG)
2594 lc->requested_fc = PAUSE_AUTONEG;
2595 else
2596 return -EINVAL;
2597
2598 if (epause->rx_pause)
2599 lc->requested_fc |= PAUSE_RX;
2600 if (epause->tx_pause)
2601 lc->requested_fc |= PAUSE_TX;
2602 if (netif_running(dev))
060e0c75
DM
2603 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2604 lc);
b8ff05a9
DM
2605 return 0;
2606}
2607
b8ff05a9
DM
2608static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2609{
2610 const struct port_info *pi = netdev_priv(dev);
2611 const struct sge *s = &pi->adapter->sge;
2612
2613 e->rx_max_pending = MAX_RX_BUFFERS;
2614 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2615 e->rx_jumbo_max_pending = 0;
2616 e->tx_max_pending = MAX_TXQ_ENTRIES;
2617
2618 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2619 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2620 e->rx_jumbo_pending = 0;
2621 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2622}
2623
2624static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2625{
2626 int i;
2627 const struct port_info *pi = netdev_priv(dev);
2628 struct adapter *adapter = pi->adapter;
2629 struct sge *s = &adapter->sge;
2630
2631 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2632 e->tx_pending > MAX_TXQ_ENTRIES ||
2633 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2634 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2635 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2636 return -EINVAL;
2637
2638 if (adapter->flags & FULL_INIT_DONE)
2639 return -EBUSY;
2640
2641 for (i = 0; i < pi->nqsets; ++i) {
2642 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2643 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2644 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2645 }
2646 return 0;
2647}
2648
2649static int closest_timer(const struct sge *s, int time)
2650{
2651 int i, delta, match = 0, min_delta = INT_MAX;
2652
2653 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2654 delta = time - s->timer_val[i];
2655 if (delta < 0)
2656 delta = -delta;
2657 if (delta < min_delta) {
2658 min_delta = delta;
2659 match = i;
2660 }
2661 }
2662 return match;
2663}
2664
2665static int closest_thres(const struct sge *s, int thres)
2666{
2667 int i, delta, match = 0, min_delta = INT_MAX;
2668
2669 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2670 delta = thres - s->counter_val[i];
2671 if (delta < 0)
2672 delta = -delta;
2673 if (delta < min_delta) {
2674 min_delta = delta;
2675 match = i;
2676 }
2677 }
2678 return match;
2679}
2680
2681/*
2682 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2683 */
2684static unsigned int qtimer_val(const struct adapter *adap,
2685 const struct sge_rspq *q)
2686{
2687 unsigned int idx = q->intr_params >> 1;
2688
2689 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2690}
2691
2692/**
c887ad0e 2693 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
b8ff05a9
DM
2694 * @q: the Rx queue
2695 * @us: the hold-off time in us, or 0 to disable timer
2696 * @cnt: the hold-off packet count, or 0 to disable counter
2697 *
2698 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2699 * one of the two needs to be enabled for the queue to generate interrupts.
2700 */
c887ad0e
HS
2701static int set_rspq_intr_params(struct sge_rspq *q,
2702 unsigned int us, unsigned int cnt)
b8ff05a9 2703{
c887ad0e
HS
2704 struct adapter *adap = q->adap;
2705
b8ff05a9
DM
2706 if ((us | cnt) == 0)
2707 cnt = 1;
2708
2709 if (cnt) {
2710 int err;
2711 u32 v, new_idx;
2712
2713 new_idx = closest_thres(&adap->sge, cnt);
2714 if (q->desc && q->pktcnt_idx != new_idx) {
2715 /* the queue has already been created, update it */
2716 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2717 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2718 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
2719 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2720 &new_idx);
b8ff05a9
DM
2721 if (err)
2722 return err;
2723 }
2724 q->pktcnt_idx = new_idx;
2725 }
2726
2727 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2728 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2729 return 0;
2730}
2731
c887ad0e
HS
2732/**
2733 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2734 * @dev: the network device
2735 * @us: the hold-off time in us, or 0 to disable timer
2736 * @cnt: the hold-off packet count, or 0 to disable counter
2737 *
2738 * Set the RX interrupt hold-off parameters for a network device.
2739 */
2740static int set_rx_intr_params(struct net_device *dev,
2741 unsigned int us, unsigned int cnt)
b8ff05a9 2742{
c887ad0e
HS
2743 int i, err;
2744 struct port_info *pi = netdev_priv(dev);
b8ff05a9 2745 struct adapter *adap = pi->adapter;
c887ad0e
HS
2746 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2747
2748 for (i = 0; i < pi->nqsets; i++, q++) {
2749 err = set_rspq_intr_params(&q->rspq, us, cnt);
2750 if (err)
2751 return err;
d4fc9dc2 2752 }
c887ad0e
HS
2753 return 0;
2754}
2755
2756static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2757{
2758 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2759 c->rx_max_coalesced_frames);
b8ff05a9
DM
2760}
2761
2762static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2763{
2764 const struct port_info *pi = netdev_priv(dev);
2765 const struct adapter *adap = pi->adapter;
2766 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2767
2768 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2769 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2770 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2771 return 0;
2772}
2773
1478b3ee
DM
2774/**
2775 * eeprom_ptov - translate a physical EEPROM address to virtual
2776 * @phys_addr: the physical EEPROM address
2777 * @fn: the PCI function number
2778 * @sz: size of function-specific area
2779 *
2780 * Translate a physical EEPROM address to virtual. The first 1K is
2781 * accessed through virtual addresses starting at 31K, the rest is
2782 * accessed through virtual addresses starting at 0.
2783 *
2784 * The mapping is as follows:
2785 * [0..1K) -> [31K..32K)
2786 * [1K..1K+A) -> [31K-A..31K)
2787 * [1K+A..ES) -> [0..ES-A-1K)
2788 *
2789 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 2790 */
1478b3ee 2791static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 2792{
1478b3ee 2793 fn *= sz;
b8ff05a9
DM
2794 if (phys_addr < 1024)
2795 return phys_addr + (31 << 10);
1478b3ee
DM
2796 if (phys_addr < 1024 + fn)
2797 return 31744 - fn + phys_addr - 1024;
b8ff05a9 2798 if (phys_addr < EEPROMSIZE)
1478b3ee 2799 return phys_addr - 1024 - fn;
b8ff05a9
DM
2800 return -EINVAL;
2801}
2802
2803/*
2804 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
2805 */
2806static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2807{
1478b3ee 2808 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2809
2810 if (vaddr >= 0)
2811 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2812 return vaddr < 0 ? vaddr : 0;
2813}
2814
2815static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2816{
1478b3ee 2817 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2818
2819 if (vaddr >= 0)
2820 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2821 return vaddr < 0 ? vaddr : 0;
2822}
2823
2824#define EEPROM_MAGIC 0x38E2F10C
2825
2826static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2827 u8 *data)
2828{
2829 int i, err = 0;
2830 struct adapter *adapter = netdev2adap(dev);
2831
2832 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2833 if (!buf)
2834 return -ENOMEM;
2835
2836 e->magic = EEPROM_MAGIC;
2837 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2838 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2839
2840 if (!err)
2841 memcpy(data, buf + e->offset, e->len);
2842 kfree(buf);
2843 return err;
2844}
2845
2846static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2847 u8 *data)
2848{
2849 u8 *buf;
2850 int err = 0;
2851 u32 aligned_offset, aligned_len, *p;
2852 struct adapter *adapter = netdev2adap(dev);
2853
2854 if (eeprom->magic != EEPROM_MAGIC)
2855 return -EINVAL;
2856
2857 aligned_offset = eeprom->offset & ~3;
2858 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2859
1478b3ee
DM
2860 if (adapter->fn > 0) {
2861 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2862
2863 if (aligned_offset < start ||
2864 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2865 return -EPERM;
2866 }
2867
b8ff05a9
DM
2868 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2869 /*
2870 * RMW possibly needed for first or last words.
2871 */
2872 buf = kmalloc(aligned_len, GFP_KERNEL);
2873 if (!buf)
2874 return -ENOMEM;
2875 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2876 if (!err && aligned_len > 4)
2877 err = eeprom_rd_phys(adapter,
2878 aligned_offset + aligned_len - 4,
2879 (u32 *)&buf[aligned_len - 4]);
2880 if (err)
2881 goto out;
2882 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2883 } else
2884 buf = data;
2885
2886 err = t4_seeprom_wp(adapter, false);
2887 if (err)
2888 goto out;
2889
2890 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2891 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2892 aligned_offset += 4;
2893 }
2894
2895 if (!err)
2896 err = t4_seeprom_wp(adapter, true);
2897out:
2898 if (buf != data)
2899 kfree(buf);
2900 return err;
2901}
2902
2903static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2904{
2905 int ret;
2906 const struct firmware *fw;
2907 struct adapter *adap = netdev2adap(netdev);
2908
2909 ef->data[sizeof(ef->data) - 1] = '\0';
2910 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2911 if (ret < 0)
2912 return ret;
2913
2914 ret = t4_load_fw(adap, fw->data, fw->size);
2915 release_firmware(fw);
2916 if (!ret)
2917 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2918 return ret;
2919}
2920
2921#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2922#define BCAST_CRC 0xa0ccc1a6
2923
2924static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2925{
2926 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2927 wol->wolopts = netdev2adap(dev)->wol;
2928 memset(&wol->sopass, 0, sizeof(wol->sopass));
2929}
2930
2931static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2932{
2933 int err = 0;
2934 struct port_info *pi = netdev_priv(dev);
2935
2936 if (wol->wolopts & ~WOL_SUPPORTED)
2937 return -EINVAL;
2938 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2939 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2940 if (wol->wolopts & WAKE_BCAST) {
2941 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2942 ~0ULL, 0, false);
2943 if (!err)
2944 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2945 ~6ULL, ~0ULL, BCAST_CRC, true);
2946 } else
2947 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2948 return err;
2949}
2950
c8f44aff 2951static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 2952{
2ed28baa 2953 const struct port_info *pi = netdev_priv(dev);
c8f44aff 2954 netdev_features_t changed = dev->features ^ features;
19ecae2c 2955 int err;
19ecae2c 2956
f646968f 2957 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 2958 return 0;
19ecae2c 2959
2ed28baa
MM
2960 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2961 -1, -1, -1,
f646968f 2962 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 2963 if (unlikely(err))
f646968f 2964 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 2965 return err;
87b6cf51
DM
2966}
2967
7850f63f 2968static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
2969{
2970 const struct port_info *pi = netdev_priv(dev);
671b0060 2971
7850f63f
BH
2972 return pi->rss_size;
2973}
2974
fe62d001 2975static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
7850f63f
BH
2976{
2977 const struct port_info *pi = netdev_priv(dev);
2978 unsigned int n = pi->rss_size;
2979
671b0060 2980 while (n--)
7850f63f 2981 p[n] = pi->rss[n];
671b0060
DM
2982 return 0;
2983}
2984
fe62d001 2985static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
671b0060
DM
2986{
2987 unsigned int i;
2988 struct port_info *pi = netdev_priv(dev);
2989
7850f63f
BH
2990 for (i = 0; i < pi->rss_size; i++)
2991 pi->rss[i] = p[i];
671b0060
DM
2992 if (pi->adapter->flags & FULL_INIT_DONE)
2993 return write_rss(pi, pi->rss);
2994 return 0;
2995}
2996
2997static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 2998 u32 *rules)
671b0060 2999{
f796564a
DM
3000 const struct port_info *pi = netdev_priv(dev);
3001
671b0060 3002 switch (info->cmd) {
f796564a
DM
3003 case ETHTOOL_GRXFH: {
3004 unsigned int v = pi->rss_mode;
3005
3006 info->data = 0;
3007 switch (info->flow_type) {
3008 case TCP_V4_FLOW:
3009 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3010 info->data = RXH_IP_SRC | RXH_IP_DST |
3011 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3012 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3013 info->data = RXH_IP_SRC | RXH_IP_DST;
3014 break;
3015 case UDP_V4_FLOW:
3016 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3017 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3018 info->data = RXH_IP_SRC | RXH_IP_DST |
3019 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3020 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3021 info->data = RXH_IP_SRC | RXH_IP_DST;
3022 break;
3023 case SCTP_V4_FLOW:
3024 case AH_ESP_V4_FLOW:
3025 case IPV4_FLOW:
3026 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3027 info->data = RXH_IP_SRC | RXH_IP_DST;
3028 break;
3029 case TCP_V6_FLOW:
3030 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3031 info->data = RXH_IP_SRC | RXH_IP_DST |
3032 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3033 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3034 info->data = RXH_IP_SRC | RXH_IP_DST;
3035 break;
3036 case UDP_V6_FLOW:
3037 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3038 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3039 info->data = RXH_IP_SRC | RXH_IP_DST |
3040 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3041 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3042 info->data = RXH_IP_SRC | RXH_IP_DST;
3043 break;
3044 case SCTP_V6_FLOW:
3045 case AH_ESP_V6_FLOW:
3046 case IPV6_FLOW:
3047 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3048 info->data = RXH_IP_SRC | RXH_IP_DST;
3049 break;
3050 }
3051 return 0;
3052 }
671b0060 3053 case ETHTOOL_GRXRINGS:
f796564a 3054 info->data = pi->nqsets;
671b0060
DM
3055 return 0;
3056 }
3057 return -EOPNOTSUPP;
3058}
3059
9b07be4b 3060static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
3061 .get_settings = get_settings,
3062 .set_settings = set_settings,
3063 .get_drvinfo = get_drvinfo,
3064 .get_msglevel = get_msglevel,
3065 .set_msglevel = set_msglevel,
3066 .get_ringparam = get_sge_param,
3067 .set_ringparam = set_sge_param,
3068 .get_coalesce = get_coalesce,
3069 .set_coalesce = set_coalesce,
3070 .get_eeprom_len = get_eeprom_len,
3071 .get_eeprom = get_eeprom,
3072 .set_eeprom = set_eeprom,
3073 .get_pauseparam = get_pauseparam,
3074 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
3075 .get_link = ethtool_op_get_link,
3076 .get_strings = get_strings,
c5e06360 3077 .set_phys_id = identify_port,
b8ff05a9
DM
3078 .nway_reset = restart_autoneg,
3079 .get_sset_count = get_sset_count,
3080 .get_ethtool_stats = get_stats,
3081 .get_regs_len = get_regs_len,
3082 .get_regs = get_regs,
3083 .get_wol = get_wol,
3084 .set_wol = set_wol,
671b0060 3085 .get_rxnfc = get_rxnfc,
7850f63f 3086 .get_rxfh_indir_size = get_rss_table_size,
fe62d001
BH
3087 .get_rxfh = get_rss_table,
3088 .set_rxfh = set_rss_table,
b8ff05a9
DM
3089 .flash_device = set_flash,
3090};
3091
3092/*
3093 * debugfs support
3094 */
b8ff05a9
DM
3095static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3096 loff_t *ppos)
3097{
3098 loff_t pos = *ppos;
496ad9aa 3099 loff_t avail = file_inode(file)->i_size;
b8ff05a9
DM
3100 unsigned int mem = (uintptr_t)file->private_data & 3;
3101 struct adapter *adap = file->private_data - mem;
fc5ab020
HS
3102 __be32 *data;
3103 int ret;
b8ff05a9
DM
3104
3105 if (pos < 0)
3106 return -EINVAL;
3107 if (pos >= avail)
3108 return 0;
3109 if (count > avail - pos)
3110 count = avail - pos;
3111
fc5ab020
HS
3112 data = t4_alloc_mem(count);
3113 if (!data)
3114 return -ENOMEM;
b8ff05a9 3115
fc5ab020
HS
3116 spin_lock(&adap->win0_lock);
3117 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3118 spin_unlock(&adap->win0_lock);
3119 if (ret) {
3120 t4_free_mem(data);
3121 return ret;
3122 }
3123 ret = copy_to_user(buf, data, count);
b8ff05a9 3124
fc5ab020
HS
3125 t4_free_mem(data);
3126 if (ret)
3127 return -EFAULT;
b8ff05a9 3128
fc5ab020 3129 *ppos = pos + count;
b8ff05a9
DM
3130 return count;
3131}
3132
3133static const struct file_operations mem_debugfs_fops = {
3134 .owner = THIS_MODULE,
234e3405 3135 .open = simple_open,
b8ff05a9 3136 .read = mem_read,
6038f373 3137 .llseek = default_llseek,
b8ff05a9
DM
3138};
3139
91744948 3140static void add_debugfs_mem(struct adapter *adap, const char *name,
1dd06ae8 3141 unsigned int idx, unsigned int size_mb)
b8ff05a9
DM
3142{
3143 struct dentry *de;
3144
3145 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3146 (void *)adap + idx, &mem_debugfs_fops);
3147 if (de && de->d_inode)
3148 de->d_inode->i_size = size_mb << 20;
3149}
3150
91744948 3151static int setup_debugfs(struct adapter *adap)
b8ff05a9
DM
3152{
3153 int i;
19dd37ba 3154 u32 size;
b8ff05a9
DM
3155
3156 if (IS_ERR_OR_NULL(adap->debugfs_root))
3157 return -1;
3158
3159 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
19dd37ba
SR
3160 if (i & EDRAM0_ENABLE) {
3161 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3162 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3163 }
3164 if (i & EDRAM1_ENABLE) {
3165 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3166 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3167 }
d14807dd 3168 if (is_t4(adap->params.chip)) {
19dd37ba
SR
3169 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3170 if (i & EXT_MEM_ENABLE)
3171 add_debugfs_mem(adap, "mc", MEM_MC,
3172 EXT_MEM_SIZE_GET(size));
3173 } else {
3174 if (i & EXT_MEM_ENABLE) {
3175 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3176 add_debugfs_mem(adap, "mc0", MEM_MC0,
3177 EXT_MEM_SIZE_GET(size));
3178 }
3179 if (i & EXT_MEM1_ENABLE) {
3180 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3181 add_debugfs_mem(adap, "mc1", MEM_MC1,
3182 EXT_MEM_SIZE_GET(size));
3183 }
3184 }
b8ff05a9
DM
3185 if (adap->l2t)
3186 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3187 &t4_l2t_fops);
3188 return 0;
3189}
3190
3191/*
3192 * upper-layer driver support
3193 */
3194
3195/*
3196 * Allocate an active-open TID and set it to the supplied value.
3197 */
3198int cxgb4_alloc_atid(struct tid_info *t, void *data)
3199{
3200 int atid = -1;
3201
3202 spin_lock_bh(&t->atid_lock);
3203 if (t->afree) {
3204 union aopen_entry *p = t->afree;
3205
f2b7e78d 3206 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
3207 t->afree = p->next;
3208 p->data = data;
3209 t->atids_in_use++;
3210 }
3211 spin_unlock_bh(&t->atid_lock);
3212 return atid;
3213}
3214EXPORT_SYMBOL(cxgb4_alloc_atid);
3215
3216/*
3217 * Release an active-open TID.
3218 */
3219void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3220{
f2b7e78d 3221 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
3222
3223 spin_lock_bh(&t->atid_lock);
3224 p->next = t->afree;
3225 t->afree = p;
3226 t->atids_in_use--;
3227 spin_unlock_bh(&t->atid_lock);
3228}
3229EXPORT_SYMBOL(cxgb4_free_atid);
3230
3231/*
3232 * Allocate a server TID and set it to the supplied value.
3233 */
3234int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3235{
3236 int stid;
3237
3238 spin_lock_bh(&t->stid_lock);
3239 if (family == PF_INET) {
3240 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3241 if (stid < t->nstids)
3242 __set_bit(stid, t->stid_bmap);
3243 else
3244 stid = -1;
3245 } else {
3246 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3247 if (stid < 0)
3248 stid = -1;
3249 }
3250 if (stid >= 0) {
3251 t->stid_tab[stid].data = data;
3252 stid += t->stid_base;
15f63b74
KS
3253 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3254 * This is equivalent to 4 TIDs. With CLIP enabled it
3255 * needs 2 TIDs.
3256 */
3257 if (family == PF_INET)
3258 t->stids_in_use++;
3259 else
3260 t->stids_in_use += 4;
b8ff05a9
DM
3261 }
3262 spin_unlock_bh(&t->stid_lock);
3263 return stid;
3264}
3265EXPORT_SYMBOL(cxgb4_alloc_stid);
3266
dca4faeb
VP
3267/* Allocate a server filter TID and set it to the supplied value.
3268 */
3269int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3270{
3271 int stid;
3272
3273 spin_lock_bh(&t->stid_lock);
3274 if (family == PF_INET) {
3275 stid = find_next_zero_bit(t->stid_bmap,
3276 t->nstids + t->nsftids, t->nstids);
3277 if (stid < (t->nstids + t->nsftids))
3278 __set_bit(stid, t->stid_bmap);
3279 else
3280 stid = -1;
3281 } else {
3282 stid = -1;
3283 }
3284 if (stid >= 0) {
3285 t->stid_tab[stid].data = data;
470c60c4
KS
3286 stid -= t->nstids;
3287 stid += t->sftid_base;
dca4faeb
VP
3288 t->stids_in_use++;
3289 }
3290 spin_unlock_bh(&t->stid_lock);
3291 return stid;
3292}
3293EXPORT_SYMBOL(cxgb4_alloc_sftid);
3294
3295/* Release a server TID.
b8ff05a9
DM
3296 */
3297void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3298{
470c60c4
KS
3299 /* Is it a server filter TID? */
3300 if (t->nsftids && (stid >= t->sftid_base)) {
3301 stid -= t->sftid_base;
3302 stid += t->nstids;
3303 } else {
3304 stid -= t->stid_base;
3305 }
3306
b8ff05a9
DM
3307 spin_lock_bh(&t->stid_lock);
3308 if (family == PF_INET)
3309 __clear_bit(stid, t->stid_bmap);
3310 else
3311 bitmap_release_region(t->stid_bmap, stid, 2);
3312 t->stid_tab[stid].data = NULL;
15f63b74
KS
3313 if (family == PF_INET)
3314 t->stids_in_use--;
3315 else
3316 t->stids_in_use -= 4;
b8ff05a9
DM
3317 spin_unlock_bh(&t->stid_lock);
3318}
3319EXPORT_SYMBOL(cxgb4_free_stid);
3320
3321/*
3322 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3323 */
3324static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3325 unsigned int tid)
3326{
3327 struct cpl_tid_release *req;
3328
3329 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3330 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3331 INIT_TP_WR(req, tid);
3332 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3333}
3334
3335/*
3336 * Queue a TID release request and if necessary schedule a work queue to
3337 * process it.
3338 */
31b9c19b 3339static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3340 unsigned int tid)
b8ff05a9
DM
3341{
3342 void **p = &t->tid_tab[tid];
3343 struct adapter *adap = container_of(t, struct adapter, tids);
3344
3345 spin_lock_bh(&adap->tid_release_lock);
3346 *p = adap->tid_release_head;
3347 /* Low 2 bits encode the Tx channel number */
3348 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3349 if (!adap->tid_release_task_busy) {
3350 adap->tid_release_task_busy = true;
29aaee65 3351 queue_work(adap->workq, &adap->tid_release_task);
b8ff05a9
DM
3352 }
3353 spin_unlock_bh(&adap->tid_release_lock);
3354}
b8ff05a9
DM
3355
3356/*
3357 * Process the list of pending TID release requests.
3358 */
3359static void process_tid_release_list(struct work_struct *work)
3360{
3361 struct sk_buff *skb;
3362 struct adapter *adap;
3363
3364 adap = container_of(work, struct adapter, tid_release_task);
3365
3366 spin_lock_bh(&adap->tid_release_lock);
3367 while (adap->tid_release_head) {
3368 void **p = adap->tid_release_head;
3369 unsigned int chan = (uintptr_t)p & 3;
3370 p = (void *)p - chan;
3371
3372 adap->tid_release_head = *p;
3373 *p = NULL;
3374 spin_unlock_bh(&adap->tid_release_lock);
3375
3376 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3377 GFP_KERNEL)))
3378 schedule_timeout_uninterruptible(1);
3379
3380 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3381 t4_ofld_send(adap, skb);
3382 spin_lock_bh(&adap->tid_release_lock);
3383 }
3384 adap->tid_release_task_busy = false;
3385 spin_unlock_bh(&adap->tid_release_lock);
3386}
3387
3388/*
3389 * Release a TID and inform HW. If we are unable to allocate the release
3390 * message we defer to a work queue.
3391 */
3392void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3393{
3394 void *old;
3395 struct sk_buff *skb;
3396 struct adapter *adap = container_of(t, struct adapter, tids);
3397
3398 old = t->tid_tab[tid];
3399 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3400 if (likely(skb)) {
3401 t->tid_tab[tid] = NULL;
3402 mk_tid_release(skb, chan, tid);
3403 t4_ofld_send(adap, skb);
3404 } else
3405 cxgb4_queue_tid_release(t, chan, tid);
3406 if (old)
3407 atomic_dec(&t->tids_in_use);
3408}
3409EXPORT_SYMBOL(cxgb4_remove_tid);
3410
3411/*
3412 * Allocate and initialize the TID tables. Returns 0 on success.
3413 */
3414static int tid_init(struct tid_info *t)
3415{
3416 size_t size;
f2b7e78d 3417 unsigned int stid_bmap_size;
b8ff05a9 3418 unsigned int natids = t->natids;
b6f8eaec 3419 struct adapter *adap = container_of(t, struct adapter, tids);
b8ff05a9 3420
dca4faeb 3421 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
3422 size = t->ntids * sizeof(*t->tid_tab) +
3423 natids * sizeof(*t->atid_tab) +
b8ff05a9 3424 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 3425 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 3426 stid_bmap_size * sizeof(long) +
dca4faeb
VP
3427 t->nftids * sizeof(*t->ftid_tab) +
3428 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 3429
b8ff05a9
DM
3430 t->tid_tab = t4_alloc_mem(size);
3431 if (!t->tid_tab)
3432 return -ENOMEM;
3433
3434 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3435 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 3436 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 3437 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
3438 spin_lock_init(&t->stid_lock);
3439 spin_lock_init(&t->atid_lock);
3440
3441 t->stids_in_use = 0;
3442 t->afree = NULL;
3443 t->atids_in_use = 0;
3444 atomic_set(&t->tids_in_use, 0);
3445
3446 /* Setup the free list for atid_tab and clear the stid bitmap. */
3447 if (natids) {
3448 while (--natids)
3449 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3450 t->afree = t->atid_tab;
3451 }
dca4faeb 3452 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b6f8eaec
KS
3453 /* Reserve stid 0 for T4/T5 adapters */
3454 if (!t->stid_base &&
3455 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3456 __set_bit(0, t->stid_bmap);
3457
b8ff05a9
DM
3458 return 0;
3459}
3460
a3e3b285
AB
3461int cxgb4_clip_get(const struct net_device *dev,
3462 const struct in6_addr *lip)
01bcca68
VP
3463{
3464 struct adapter *adap;
3465 struct fw_clip_cmd c;
3466
3467 adap = netdev2adap(dev);
3468 memset(&c, 0, sizeof(c));
3469 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3470 FW_CMD_REQUEST | FW_CMD_WRITE);
3471 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
12f2a479
JP
3472 c.ip_hi = *(__be64 *)(lip->s6_addr);
3473 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
01bcca68
VP
3474 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3475}
a3e3b285 3476EXPORT_SYMBOL(cxgb4_clip_get);
01bcca68 3477
a3e3b285
AB
3478int cxgb4_clip_release(const struct net_device *dev,
3479 const struct in6_addr *lip)
01bcca68
VP
3480{
3481 struct adapter *adap;
3482 struct fw_clip_cmd c;
3483
3484 adap = netdev2adap(dev);
3485 memset(&c, 0, sizeof(c));
3486 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3487 FW_CMD_REQUEST | FW_CMD_READ);
3488 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
12f2a479
JP
3489 c.ip_hi = *(__be64 *)(lip->s6_addr);
3490 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
01bcca68
VP
3491 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3492}
a3e3b285 3493EXPORT_SYMBOL(cxgb4_clip_release);
01bcca68 3494
b8ff05a9
DM
3495/**
3496 * cxgb4_create_server - create an IP server
3497 * @dev: the device
3498 * @stid: the server TID
3499 * @sip: local IP address to bind server to
3500 * @sport: the server's TCP port
3501 * @queue: queue to direct messages from this server to
3502 *
3503 * Create an IP server for the given port and address.
3504 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3505 */
3506int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
3507 __be32 sip, __be16 sport, __be16 vlan,
3508 unsigned int queue)
b8ff05a9
DM
3509{
3510 unsigned int chan;
3511 struct sk_buff *skb;
3512 struct adapter *adap;
3513 struct cpl_pass_open_req *req;
80f40c1f 3514 int ret;
b8ff05a9
DM
3515
3516 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3517 if (!skb)
3518 return -ENOMEM;
3519
3520 adap = netdev2adap(dev);
3521 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3522 INIT_TP_WR(req, 0);
3523 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3524 req->local_port = sport;
3525 req->peer_port = htons(0);
3526 req->local_ip = sip;
3527 req->peer_ip = htonl(0);
e46dab4d 3528 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
3529 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3530 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3531 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
80f40c1f
VP
3532 ret = t4_mgmt_tx(adap, skb);
3533 return net_xmit_eval(ret);
b8ff05a9
DM
3534}
3535EXPORT_SYMBOL(cxgb4_create_server);
3536
80f40c1f
VP
3537/* cxgb4_create_server6 - create an IPv6 server
3538 * @dev: the device
3539 * @stid: the server TID
3540 * @sip: local IPv6 address to bind server to
3541 * @sport: the server's TCP port
3542 * @queue: queue to direct messages from this server to
3543 *
3544 * Create an IPv6 server for the given port and address.
3545 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3546 */
3547int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3548 const struct in6_addr *sip, __be16 sport,
3549 unsigned int queue)
3550{
3551 unsigned int chan;
3552 struct sk_buff *skb;
3553 struct adapter *adap;
3554 struct cpl_pass_open_req6 *req;
3555 int ret;
3556
3557 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3558 if (!skb)
3559 return -ENOMEM;
3560
3561 adap = netdev2adap(dev);
3562 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3563 INIT_TP_WR(req, 0);
3564 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3565 req->local_port = sport;
3566 req->peer_port = htons(0);
3567 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3568 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3569 req->peer_ip_hi = cpu_to_be64(0);
3570 req->peer_ip_lo = cpu_to_be64(0);
3571 chan = rxq_to_chan(&adap->sge, queue);
3572 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3573 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3574 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3575 ret = t4_mgmt_tx(adap, skb);
3576 return net_xmit_eval(ret);
3577}
3578EXPORT_SYMBOL(cxgb4_create_server6);
3579
3580int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3581 unsigned int queue, bool ipv6)
3582{
3583 struct sk_buff *skb;
3584 struct adapter *adap;
3585 struct cpl_close_listsvr_req *req;
3586 int ret;
3587
3588 adap = netdev2adap(dev);
3589
3590 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3591 if (!skb)
3592 return -ENOMEM;
3593
3594 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3595 INIT_TP_WR(req, 0);
3596 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3597 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3598 LISTSVR_IPV6(0)) | QUEUENO(queue));
3599 ret = t4_mgmt_tx(adap, skb);
3600 return net_xmit_eval(ret);
3601}
3602EXPORT_SYMBOL(cxgb4_remove_server);
3603
b8ff05a9
DM
3604/**
3605 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3606 * @mtus: the HW MTU table
3607 * @mtu: the target MTU
3608 * @idx: index of selected entry in the MTU table
3609 *
3610 * Returns the index and the value in the HW MTU table that is closest to
3611 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3612 * table, in which case that smallest available value is selected.
3613 */
3614unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3615 unsigned int *idx)
3616{
3617 unsigned int i = 0;
3618
3619 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3620 ++i;
3621 if (idx)
3622 *idx = i;
3623 return mtus[i];
3624}
3625EXPORT_SYMBOL(cxgb4_best_mtu);
3626
92e7ae71
HS
3627/**
3628 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3629 * @mtus: the HW MTU table
3630 * @header_size: Header Size
3631 * @data_size_max: maximum Data Segment Size
3632 * @data_size_align: desired Data Segment Size Alignment (2^N)
3633 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3634 *
3635 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3636 * MTU Table based solely on a Maximum MTU parameter, we break that
3637 * parameter up into a Header Size and Maximum Data Segment Size, and
3638 * provide a desired Data Segment Size Alignment. If we find an MTU in
3639 * the Hardware MTU Table which will result in a Data Segment Size with
3640 * the requested alignment _and_ that MTU isn't "too far" from the
3641 * closest MTU, then we'll return that rather than the closest MTU.
3642 */
3643unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3644 unsigned short header_size,
3645 unsigned short data_size_max,
3646 unsigned short data_size_align,
3647 unsigned int *mtu_idxp)
3648{
3649 unsigned short max_mtu = header_size + data_size_max;
3650 unsigned short data_size_align_mask = data_size_align - 1;
3651 int mtu_idx, aligned_mtu_idx;
3652
3653 /* Scan the MTU Table till we find an MTU which is larger than our
3654 * Maximum MTU or we reach the end of the table. Along the way,
3655 * record the last MTU found, if any, which will result in a Data
3656 * Segment Length matching the requested alignment.
3657 */
3658 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3659 unsigned short data_size = mtus[mtu_idx] - header_size;
3660
3661 /* If this MTU minus the Header Size would result in a
3662 * Data Segment Size of the desired alignment, remember it.
3663 */
3664 if ((data_size & data_size_align_mask) == 0)
3665 aligned_mtu_idx = mtu_idx;
3666
3667 /* If we're not at the end of the Hardware MTU Table and the
3668 * next element is larger than our Maximum MTU, drop out of
3669 * the loop.
3670 */
3671 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3672 break;
3673 }
3674
3675 /* If we fell out of the loop because we ran to the end of the table,
3676 * then we just have to use the last [largest] entry.
3677 */
3678 if (mtu_idx == NMTUS)
3679 mtu_idx--;
3680
3681 /* If we found an MTU which resulted in the requested Data Segment
3682 * Length alignment and that's "not far" from the largest MTU which is
3683 * less than or equal to the maximum MTU, then use that.
3684 */
3685 if (aligned_mtu_idx >= 0 &&
3686 mtu_idx - aligned_mtu_idx <= 1)
3687 mtu_idx = aligned_mtu_idx;
3688
3689 /* If the caller has passed in an MTU Index pointer, pass the
3690 * MTU Index back. Return the MTU value.
3691 */
3692 if (mtu_idxp)
3693 *mtu_idxp = mtu_idx;
3694 return mtus[mtu_idx];
3695}
3696EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3697
b8ff05a9
DM
3698/**
3699 * cxgb4_port_chan - get the HW channel of a port
3700 * @dev: the net device for the port
3701 *
3702 * Return the HW Tx channel of the given port.
3703 */
3704unsigned int cxgb4_port_chan(const struct net_device *dev)
3705{
3706 return netdev2pinfo(dev)->tx_chan;
3707}
3708EXPORT_SYMBOL(cxgb4_port_chan);
3709
881806bc
VP
3710unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3711{
3712 struct adapter *adap = netdev2adap(dev);
2cc301d2 3713 u32 v1, v2, lp_count, hp_count;
881806bc 3714
2cc301d2
SR
3715 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3716 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
d14807dd 3717 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3718 lp_count = G_LP_COUNT(v1);
3719 hp_count = G_HP_COUNT(v1);
3720 } else {
3721 lp_count = G_LP_COUNT_T5(v1);
3722 hp_count = G_HP_COUNT_T5(v2);
3723 }
3724 return lpfifo ? lp_count : hp_count;
881806bc
VP
3725}
3726EXPORT_SYMBOL(cxgb4_dbfifo_count);
3727
b8ff05a9
DM
3728/**
3729 * cxgb4_port_viid - get the VI id of a port
3730 * @dev: the net device for the port
3731 *
3732 * Return the VI id of the given port.
3733 */
3734unsigned int cxgb4_port_viid(const struct net_device *dev)
3735{
3736 return netdev2pinfo(dev)->viid;
3737}
3738EXPORT_SYMBOL(cxgb4_port_viid);
3739
3740/**
3741 * cxgb4_port_idx - get the index of a port
3742 * @dev: the net device for the port
3743 *
3744 * Return the index of the given port.
3745 */
3746unsigned int cxgb4_port_idx(const struct net_device *dev)
3747{
3748 return netdev2pinfo(dev)->port_id;
3749}
3750EXPORT_SYMBOL(cxgb4_port_idx);
3751
b8ff05a9
DM
3752void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3753 struct tp_tcp_stats *v6)
3754{
3755 struct adapter *adap = pci_get_drvdata(pdev);
3756
3757 spin_lock(&adap->stats_lock);
3758 t4_tp_get_tcp_stats(adap, v4, v6);
3759 spin_unlock(&adap->stats_lock);
3760}
3761EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3762
3763void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3764 const unsigned int *pgsz_order)
3765{
3766 struct adapter *adap = netdev2adap(dev);
3767
3768 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3769 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3770 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3771 HPZ3(pgsz_order[3]));
3772}
3773EXPORT_SYMBOL(cxgb4_iscsi_init);
3774
3069ee9b
VP
3775int cxgb4_flush_eq_cache(struct net_device *dev)
3776{
3777 struct adapter *adap = netdev2adap(dev);
3778 int ret;
3779
3780 ret = t4_fwaddrspace_write(adap, adap->mbox,
3781 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3782 return ret;
3783}
3784EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3785
3786static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3787{
3788 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3789 __be64 indices;
3790 int ret;
3791
fc5ab020
HS
3792 spin_lock(&adap->win0_lock);
3793 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3794 sizeof(indices), (__be32 *)&indices,
3795 T4_MEMORY_READ);
3796 spin_unlock(&adap->win0_lock);
3069ee9b 3797 if (!ret) {
404d9e3f
VP
3798 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3799 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
3800 }
3801 return ret;
3802}
3803
3804int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3805 u16 size)
3806{
3807 struct adapter *adap = netdev2adap(dev);
3808 u16 hw_pidx, hw_cidx;
3809 int ret;
3810
3811 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3812 if (ret)
3813 goto out;
3814
3815 if (pidx != hw_pidx) {
3816 u16 delta;
3817
3818 if (pidx >= hw_pidx)
3819 delta = pidx - hw_pidx;
3820 else
3821 delta = size - hw_pidx + pidx;
3822 wmb();
840f3000
VP
3823 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3824 QID(qid) | PIDX(delta));
3069ee9b
VP
3825 }
3826out:
3827 return ret;
3828}
3829EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3830
3cbdb928
VP
3831void cxgb4_disable_db_coalescing(struct net_device *dev)
3832{
3833 struct adapter *adap;
3834
3835 adap = netdev2adap(dev);
3836 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3837 F_NOCOALESCE);
3838}
3839EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3840
3841void cxgb4_enable_db_coalescing(struct net_device *dev)
3842{
3843 struct adapter *adap;
3844
3845 adap = netdev2adap(dev);
3846 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3847}
3848EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3849
031cf476
HS
3850int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3851{
3852 struct adapter *adap;
3853 u32 offset, memtype, memaddr;
3854 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3855 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3856 int ret;
3857
3858 adap = netdev2adap(dev);
3859
3860 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3861
3862 /* Figure out where the offset lands in the Memory Type/Address scheme.
3863 * This code assumes that the memory is laid out starting at offset 0
3864 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3865 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3866 * MC0, and some have both MC0 and MC1.
3867 */
3868 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3869 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3870 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3871
3872 edc0_end = edc0_size;
3873 edc1_end = edc0_end + edc1_size;
3874 mc0_end = edc1_end + mc0_size;
3875
3876 if (offset < edc0_end) {
3877 memtype = MEM_EDC0;
3878 memaddr = offset;
3879 } else if (offset < edc1_end) {
3880 memtype = MEM_EDC1;
3881 memaddr = offset - edc0_end;
3882 } else {
3883 if (offset < mc0_end) {
3884 memtype = MEM_MC0;
3885 memaddr = offset - edc1_end;
3886 } else if (is_t4(adap->params.chip)) {
3887 /* T4 only has a single memory channel */
3888 goto err;
3889 } else {
3890 mc1_size = EXT_MEM_SIZE_GET(
3891 t4_read_reg(adap,
3892 MA_EXT_MEMORY1_BAR)) << 20;
3893 mc1_end = mc0_end + mc1_size;
3894 if (offset < mc1_end) {
3895 memtype = MEM_MC1;
3896 memaddr = offset - mc0_end;
3897 } else {
3898 /* offset beyond the end of any memory */
3899 goto err;
3900 }
3901 }
3902 }
3903
3904 spin_lock(&adap->win0_lock);
3905 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3906 spin_unlock(&adap->win0_lock);
3907 return ret;
3908
3909err:
3910 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3911 stag, offset);
3912 return -EINVAL;
3913}
3914EXPORT_SYMBOL(cxgb4_read_tpte);
3915
7730b4c7
HS
3916u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3917{
3918 u32 hi, lo;
3919 struct adapter *adap;
3920
3921 adap = netdev2adap(dev);
3922 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3923 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3924
3925 return ((u64)hi << 32) | (u64)lo;
3926}
3927EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3928
b8ff05a9
DM
3929static struct pci_driver cxgb4_driver;
3930
3931static void check_neigh_update(struct neighbour *neigh)
3932{
3933 const struct device *parent;
3934 const struct net_device *netdev = neigh->dev;
3935
3936 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3937 netdev = vlan_dev_real_dev(netdev);
3938 parent = netdev->dev.parent;
3939 if (parent && parent->driver == &cxgb4_driver.driver)
3940 t4_l2t_update(dev_get_drvdata(parent), neigh);
3941}
3942
3943static int netevent_cb(struct notifier_block *nb, unsigned long event,
3944 void *data)
3945{
3946 switch (event) {
3947 case NETEVENT_NEIGH_UPDATE:
3948 check_neigh_update(data);
3949 break;
b8ff05a9
DM
3950 case NETEVENT_REDIRECT:
3951 default:
3952 break;
3953 }
3954 return 0;
3955}
3956
3957static bool netevent_registered;
3958static struct notifier_block cxgb4_netevent_nb = {
3959 .notifier_call = netevent_cb
3960};
3961
3069ee9b
VP
3962static void drain_db_fifo(struct adapter *adap, int usecs)
3963{
2cc301d2 3964 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
3965
3966 do {
2cc301d2
SR
3967 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3968 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
d14807dd 3969 if (is_t4(adap->params.chip)) {
2cc301d2
SR
3970 lp_count = G_LP_COUNT(v1);
3971 hp_count = G_HP_COUNT(v1);
3972 } else {
3973 lp_count = G_LP_COUNT_T5(v1);
3974 hp_count = G_HP_COUNT_T5(v2);
3975 }
3976
3977 if (lp_count == 0 && hp_count == 0)
3978 break;
3069ee9b
VP
3979 set_current_state(TASK_UNINTERRUPTIBLE);
3980 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
3981 } while (1);
3982}
3983
3984static void disable_txq_db(struct sge_txq *q)
3985{
05eb2389
SW
3986 unsigned long flags;
3987
3988 spin_lock_irqsave(&q->db_lock, flags);
3069ee9b 3989 q->db_disabled = 1;
05eb2389 3990 spin_unlock_irqrestore(&q->db_lock, flags);
3069ee9b
VP
3991}
3992
05eb2389 3993static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3069ee9b
VP
3994{
3995 spin_lock_irq(&q->db_lock);
05eb2389
SW
3996 if (q->db_pidx_inc) {
3997 /* Make sure that all writes to the TX descriptors
3998 * are committed before we tell HW about them.
3999 */
4000 wmb();
4001 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4002 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
4003 q->db_pidx_inc = 0;
4004 }
3069ee9b
VP
4005 q->db_disabled = 0;
4006 spin_unlock_irq(&q->db_lock);
4007}
4008
4009static void disable_dbs(struct adapter *adap)
4010{
4011 int i;
4012
4013 for_each_ethrxq(&adap->sge, i)
4014 disable_txq_db(&adap->sge.ethtxq[i].q);
4015 for_each_ofldrxq(&adap->sge, i)
4016 disable_txq_db(&adap->sge.ofldtxq[i].q);
4017 for_each_port(adap, i)
4018 disable_txq_db(&adap->sge.ctrlq[i].q);
4019}
4020
4021static void enable_dbs(struct adapter *adap)
4022{
4023 int i;
4024
4025 for_each_ethrxq(&adap->sge, i)
05eb2389 4026 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3069ee9b 4027 for_each_ofldrxq(&adap->sge, i)
05eb2389 4028 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3069ee9b 4029 for_each_port(adap, i)
05eb2389
SW
4030 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4031}
4032
4033static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4034{
4035 if (adap->uld_handle[CXGB4_ULD_RDMA])
4036 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4037 cmd);
4038}
4039
4040static void process_db_full(struct work_struct *work)
4041{
4042 struct adapter *adap;
4043
4044 adap = container_of(work, struct adapter, db_full_task);
4045
4046 drain_db_fifo(adap, dbfifo_drain_delay);
4047 enable_dbs(adap);
4048 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4049 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4050 DBFIFO_HP_INT | DBFIFO_LP_INT,
4051 DBFIFO_HP_INT | DBFIFO_LP_INT);
3069ee9b
VP
4052}
4053
4054static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4055{
4056 u16 hw_pidx, hw_cidx;
4057 int ret;
4058
05eb2389 4059 spin_lock_irq(&q->db_lock);
3069ee9b
VP
4060 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4061 if (ret)
4062 goto out;
4063 if (q->db_pidx != hw_pidx) {
4064 u16 delta;
4065
4066 if (q->db_pidx >= hw_pidx)
4067 delta = q->db_pidx - hw_pidx;
4068 else
4069 delta = q->size - hw_pidx + q->db_pidx;
4070 wmb();
840f3000
VP
4071 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4072 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
4073 }
4074out:
4075 q->db_disabled = 0;
05eb2389
SW
4076 q->db_pidx_inc = 0;
4077 spin_unlock_irq(&q->db_lock);
3069ee9b
VP
4078 if (ret)
4079 CH_WARN(adap, "DB drop recovery failed.\n");
4080}
4081static void recover_all_queues(struct adapter *adap)
4082{
4083 int i;
4084
4085 for_each_ethrxq(&adap->sge, i)
4086 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4087 for_each_ofldrxq(&adap->sge, i)
4088 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4089 for_each_port(adap, i)
4090 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4091}
4092
881806bc
VP
4093static void process_db_drop(struct work_struct *work)
4094{
4095 struct adapter *adap;
881806bc 4096
3069ee9b 4097 adap = container_of(work, struct adapter, db_drop_task);
881806bc 4098
d14807dd 4099 if (is_t4(adap->params.chip)) {
05eb2389 4100 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 4101 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
05eb2389 4102 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 4103 recover_all_queues(adap);
05eb2389 4104 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 4105 enable_dbs(adap);
05eb2389 4106 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2cc301d2
SR
4107 } else {
4108 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4109 u16 qid = (dropped_db >> 15) & 0x1ffff;
4110 u16 pidx_inc = dropped_db & 0x1fff;
4111 unsigned int s_qpp;
4112 unsigned short udb_density;
4113 unsigned long qpshift;
4114 int page;
4115 u32 udb;
4116
4117 dev_warn(adap->pdev_dev,
4118 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4119 dropped_db, qid,
4120 (dropped_db >> 14) & 1,
4121 (dropped_db >> 13) & 1,
4122 pidx_inc);
4123
4124 drain_db_fifo(adap, 1);
4125
4126 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4127 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4128 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4129 qpshift = PAGE_SHIFT - ilog2(udb_density);
4130 udb = qid << qpshift;
4131 udb &= PAGE_MASK;
4132 page = udb / PAGE_SIZE;
4133 udb += (qid - (page * udb_density)) * 128;
4134
4135 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4136
4137 /* Re-enable BAR2 WC */
4138 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4139 }
4140
3069ee9b 4141 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
881806bc
VP
4142}
4143
4144void t4_db_full(struct adapter *adap)
4145{
d14807dd 4146 if (is_t4(adap->params.chip)) {
05eb2389
SW
4147 disable_dbs(adap);
4148 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2cc301d2
SR
4149 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4150 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
29aaee65 4151 queue_work(adap->workq, &adap->db_full_task);
2cc301d2 4152 }
881806bc
VP
4153}
4154
4155void t4_db_dropped(struct adapter *adap)
4156{
05eb2389
SW
4157 if (is_t4(adap->params.chip)) {
4158 disable_dbs(adap);
4159 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4160 }
29aaee65 4161 queue_work(adap->workq, &adap->db_drop_task);
881806bc
VP
4162}
4163
b8ff05a9
DM
4164static void uld_attach(struct adapter *adap, unsigned int uld)
4165{
4166 void *handle;
4167 struct cxgb4_lld_info lli;
dca4faeb 4168 unsigned short i;
b8ff05a9
DM
4169
4170 lli.pdev = adap->pdev;
35b1de55 4171 lli.pf = adap->fn;
b8ff05a9
DM
4172 lli.l2t = adap->l2t;
4173 lli.tids = &adap->tids;
4174 lli.ports = adap->port;
4175 lli.vr = &adap->vres;
4176 lli.mtus = adap->params.mtus;
4177 if (uld == CXGB4_ULD_RDMA) {
4178 lli.rxq_ids = adap->sge.rdma_rxq;
cf38be6d 4179 lli.ciq_ids = adap->sge.rdma_ciq;
b8ff05a9 4180 lli.nrxq = adap->sge.rdmaqs;
cf38be6d 4181 lli.nciq = adap->sge.rdmaciqs;
b8ff05a9
DM
4182 } else if (uld == CXGB4_ULD_ISCSI) {
4183 lli.rxq_ids = adap->sge.ofld_rxq;
4184 lli.nrxq = adap->sge.ofldqsets;
4185 }
4186 lli.ntxq = adap->sge.ofldqsets;
4187 lli.nchan = adap->params.nports;
4188 lli.nports = adap->params.nports;
4189 lli.wr_cred = adap->params.ofldq_wr_cred;
d14807dd 4190 lli.adapter_type = adap->params.chip;
b8ff05a9 4191 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
7730b4c7 4192 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
b8ff05a9 4193 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
4194 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4195 (adap->fn * 4));
b8ff05a9 4196 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
4197 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4198 (adap->fn * 4));
dcf7b6f5 4199 lli.filt_mode = adap->params.tp.vlan_pri_map;
dca4faeb
VP
4200 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4201 for (i = 0; i < NCHAN; i++)
4202 lli.tx_modq[i] = i;
b8ff05a9
DM
4203 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4204 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4205 lli.fw_vers = adap->params.fw_vers;
3069ee9b 4206 lli.dbfifo_int_thresh = dbfifo_int_thresh;
04e10e21
HS
4207 lli.sge_ingpadboundary = adap->sge.fl_align;
4208 lli.sge_egrstatuspagesize = adap->sge.stat_len;
dca4faeb
VP
4209 lli.sge_pktshift = adap->sge.pktshift;
4210 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4c2c5763
HS
4211 lli.max_ordird_qp = adap->params.max_ordird_qp;
4212 lli.max_ird_adapter = adap->params.max_ird_adapter;
1ac0f095 4213 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
b8ff05a9
DM
4214
4215 handle = ulds[uld].add(&lli);
4216 if (IS_ERR(handle)) {
4217 dev_warn(adap->pdev_dev,
4218 "could not attach to the %s driver, error %ld\n",
4219 uld_str[uld], PTR_ERR(handle));
4220 return;
4221 }
4222
4223 adap->uld_handle[uld] = handle;
4224
4225 if (!netevent_registered) {
4226 register_netevent_notifier(&cxgb4_netevent_nb);
4227 netevent_registered = true;
4228 }
e29f5dbc
DM
4229
4230 if (adap->flags & FULL_INIT_DONE)
4231 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
4232}
4233
4234static void attach_ulds(struct adapter *adap)
4235{
4236 unsigned int i;
4237
01bcca68
VP
4238 spin_lock(&adap_rcu_lock);
4239 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4240 spin_unlock(&adap_rcu_lock);
4241
b8ff05a9
DM
4242 mutex_lock(&uld_mutex);
4243 list_add_tail(&adap->list_node, &adapter_list);
4244 for (i = 0; i < CXGB4_ULD_MAX; i++)
4245 if (ulds[i].add)
4246 uld_attach(adap, i);
4247 mutex_unlock(&uld_mutex);
4248}
4249
4250static void detach_ulds(struct adapter *adap)
4251{
4252 unsigned int i;
4253
4254 mutex_lock(&uld_mutex);
4255 list_del(&adap->list_node);
4256 for (i = 0; i < CXGB4_ULD_MAX; i++)
4257 if (adap->uld_handle[i]) {
4258 ulds[i].state_change(adap->uld_handle[i],
4259 CXGB4_STATE_DETACH);
4260 adap->uld_handle[i] = NULL;
4261 }
4262 if (netevent_registered && list_empty(&adapter_list)) {
4263 unregister_netevent_notifier(&cxgb4_netevent_nb);
4264 netevent_registered = false;
4265 }
4266 mutex_unlock(&uld_mutex);
01bcca68
VP
4267
4268 spin_lock(&adap_rcu_lock);
4269 list_del_rcu(&adap->rcu_node);
4270 spin_unlock(&adap_rcu_lock);
b8ff05a9
DM
4271}
4272
4273static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4274{
4275 unsigned int i;
4276
4277 mutex_lock(&uld_mutex);
4278 for (i = 0; i < CXGB4_ULD_MAX; i++)
4279 if (adap->uld_handle[i])
4280 ulds[i].state_change(adap->uld_handle[i], new_state);
4281 mutex_unlock(&uld_mutex);
4282}
4283
4284/**
4285 * cxgb4_register_uld - register an upper-layer driver
4286 * @type: the ULD type
4287 * @p: the ULD methods
4288 *
4289 * Registers an upper-layer driver with this driver and notifies the ULD
4290 * about any presently available devices that support its type. Returns
4291 * %-EBUSY if a ULD of the same type is already registered.
4292 */
4293int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4294{
4295 int ret = 0;
4296 struct adapter *adap;
4297
4298 if (type >= CXGB4_ULD_MAX)
4299 return -EINVAL;
4300 mutex_lock(&uld_mutex);
4301 if (ulds[type].add) {
4302 ret = -EBUSY;
4303 goto out;
4304 }
4305 ulds[type] = *p;
4306 list_for_each_entry(adap, &adapter_list, list_node)
4307 uld_attach(adap, type);
4308out: mutex_unlock(&uld_mutex);
4309 return ret;
4310}
4311EXPORT_SYMBOL(cxgb4_register_uld);
4312
4313/**
4314 * cxgb4_unregister_uld - unregister an upper-layer driver
4315 * @type: the ULD type
4316 *
4317 * Unregisters an existing upper-layer driver.
4318 */
4319int cxgb4_unregister_uld(enum cxgb4_uld type)
4320{
4321 struct adapter *adap;
4322
4323 if (type >= CXGB4_ULD_MAX)
4324 return -EINVAL;
4325 mutex_lock(&uld_mutex);
4326 list_for_each_entry(adap, &adapter_list, list_node)
4327 adap->uld_handle[type] = NULL;
4328 ulds[type].add = NULL;
4329 mutex_unlock(&uld_mutex);
4330 return 0;
4331}
4332EXPORT_SYMBOL(cxgb4_unregister_uld);
4333
01bcca68 4334/* Check if netdev on which event is occured belongs to us or not. Return
ee9a33b2
LR
4335 * success (true) if it belongs otherwise failure (false).
4336 * Called with rcu_read_lock() held.
01bcca68 4337 */
ee9a33b2 4338static bool cxgb4_netdev(const struct net_device *netdev)
01bcca68
VP
4339{
4340 struct adapter *adap;
4341 int i;
4342
01bcca68
VP
4343 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4344 for (i = 0; i < MAX_NPORTS; i++)
ee9a33b2
LR
4345 if (adap->port[i] == netdev)
4346 return true;
4347 return false;
01bcca68
VP
4348}
4349
4350static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4351 unsigned long event)
4352{
4353 int ret = NOTIFY_DONE;
4354
4355 rcu_read_lock();
4356 if (cxgb4_netdev(event_dev)) {
4357 switch (event) {
4358 case NETDEV_UP:
4359 ret = cxgb4_clip_get(event_dev,
4360 (const struct in6_addr *)ifa->addr.s6_addr);
4361 if (ret < 0) {
4362 rcu_read_unlock();
4363 return ret;
4364 }
4365 ret = NOTIFY_OK;
4366 break;
4367 case NETDEV_DOWN:
4368 cxgb4_clip_release(event_dev,
4369 (const struct in6_addr *)ifa->addr.s6_addr);
4370 ret = NOTIFY_OK;
4371 break;
4372 default:
4373 break;
4374 }
4375 }
4376 rcu_read_unlock();
4377 return ret;
4378}
4379
4380static int cxgb4_inet6addr_handler(struct notifier_block *this,
4381 unsigned long event, void *data)
4382{
4383 struct inet6_ifaddr *ifa = data;
4384 struct net_device *event_dev;
4385 int ret = NOTIFY_DONE;
01bcca68 4386 struct bonding *bond = netdev_priv(ifa->idev->dev);
9caff1e7 4387 struct list_head *iter;
01bcca68
VP
4388 struct slave *slave;
4389 struct pci_dev *first_pdev = NULL;
4390
4391 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4392 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4393 ret = clip_add(event_dev, ifa, event);
4394 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4395 /* It is possible that two different adapters are bonded in one
4396 * bond. We need to find such different adapters and add clip
4397 * in all of them only once.
4398 */
9caff1e7 4399 bond_for_each_slave(bond, slave, iter) {
01bcca68
VP
4400 if (!first_pdev) {
4401 ret = clip_add(slave->dev, ifa, event);
4402 /* If clip_add is success then only initialize
4403 * first_pdev since it means it is our device
4404 */
4405 if (ret == NOTIFY_OK)
4406 first_pdev = to_pci_dev(
4407 slave->dev->dev.parent);
4408 } else if (first_pdev !=
4409 to_pci_dev(slave->dev->dev.parent))
4410 ret = clip_add(slave->dev, ifa, event);
4411 }
01bcca68
VP
4412 } else
4413 ret = clip_add(ifa->idev->dev, ifa, event);
4414
4415 return ret;
4416}
4417
4418static struct notifier_block cxgb4_inet6addr_notifier = {
4419 .notifier_call = cxgb4_inet6addr_handler
4420};
4421
4422/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4423 * a physical device.
4424 * The physical device reference is needed to send the actul CLIP command.
4425 */
4426static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4427{
4428 struct inet6_dev *idev = NULL;
4429 struct inet6_ifaddr *ifa;
4430 int ret = 0;
4431
4432 idev = __in6_dev_get(root_dev);
4433 if (!idev)
4434 return ret;
4435
4436 read_lock_bh(&idev->lock);
4437 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4438 ret = cxgb4_clip_get(dev,
4439 (const struct in6_addr *)ifa->addr.s6_addr);
4440 if (ret < 0)
4441 break;
4442 }
4443 read_unlock_bh(&idev->lock);
4444
4445 return ret;
4446}
4447
4448static int update_root_dev_clip(struct net_device *dev)
4449{
4450 struct net_device *root_dev = NULL;
4451 int i, ret = 0;
4452
4453 /* First populate the real net device's IPv6 addresses */
4454 ret = update_dev_clip(dev, dev);
4455 if (ret)
4456 return ret;
4457
4458 /* Parse all bond and vlan devices layered on top of the physical dev */
4459 for (i = 0; i < VLAN_N_VID; i++) {
f06c7f9f 4460 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
01bcca68
VP
4461 if (!root_dev)
4462 continue;
4463
4464 ret = update_dev_clip(root_dev, dev);
4465 if (ret)
4466 break;
4467 }
4468 return ret;
4469}
4470
4471static void update_clip(const struct adapter *adap)
4472{
4473 int i;
4474 struct net_device *dev;
4475 int ret;
4476
4477 rcu_read_lock();
4478
4479 for (i = 0; i < MAX_NPORTS; i++) {
4480 dev = adap->port[i];
4481 ret = 0;
4482
4483 if (dev)
4484 ret = update_root_dev_clip(dev);
4485
4486 if (ret < 0)
4487 break;
4488 }
4489 rcu_read_unlock();
4490}
4491
b8ff05a9
DM
4492/**
4493 * cxgb_up - enable the adapter
4494 * @adap: adapter being enabled
4495 *
4496 * Called when the first port is enabled, this function performs the
4497 * actions necessary to make an adapter operational, such as completing
4498 * the initialization of HW modules, and enabling interrupts.
4499 *
4500 * Must be called with the rtnl lock held.
4501 */
4502static int cxgb_up(struct adapter *adap)
4503{
aaefae9b 4504 int err;
b8ff05a9 4505
aaefae9b
DM
4506 err = setup_sge_queues(adap);
4507 if (err)
4508 goto out;
4509 err = setup_rss(adap);
4510 if (err)
4511 goto freeq;
b8ff05a9
DM
4512
4513 if (adap->flags & USING_MSIX) {
aaefae9b 4514 name_msix_vecs(adap);
b8ff05a9
DM
4515 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4516 adap->msix_info[0].desc, adap);
4517 if (err)
4518 goto irq_err;
4519
4520 err = request_msix_queue_irqs(adap);
4521 if (err) {
4522 free_irq(adap->msix_info[0].vec, adap);
4523 goto irq_err;
4524 }
4525 } else {
4526 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4527 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 4528 adap->port[0]->name, adap);
b8ff05a9
DM
4529 if (err)
4530 goto irq_err;
4531 }
4532 enable_rx(adap);
4533 t4_sge_start(adap);
4534 t4_intr_enable(adap);
aaefae9b 4535 adap->flags |= FULL_INIT_DONE;
b8ff05a9 4536 notify_ulds(adap, CXGB4_STATE_UP);
01bcca68 4537 update_clip(adap);
b8ff05a9
DM
4538 out:
4539 return err;
4540 irq_err:
4541 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
4542 freeq:
4543 t4_free_sge_resources(adap);
b8ff05a9
DM
4544 goto out;
4545}
4546
4547static void cxgb_down(struct adapter *adapter)
4548{
4549 t4_intr_disable(adapter);
4550 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
4551 cancel_work_sync(&adapter->db_full_task);
4552 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 4553 adapter->tid_release_task_busy = false;
204dc3c0 4554 adapter->tid_release_head = NULL;
b8ff05a9
DM
4555
4556 if (adapter->flags & USING_MSIX) {
4557 free_msix_queue_irqs(adapter);
4558 free_irq(adapter->msix_info[0].vec, adapter);
4559 } else
4560 free_irq(adapter->pdev->irq, adapter);
4561 quiesce_rx(adapter);
aaefae9b
DM
4562 t4_sge_stop(adapter);
4563 t4_free_sge_resources(adapter);
4564 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
4565}
4566
4567/*
4568 * net_device operations
4569 */
4570static int cxgb_open(struct net_device *dev)
4571{
4572 int err;
4573 struct port_info *pi = netdev_priv(dev);
4574 struct adapter *adapter = pi->adapter;
4575
6a3c869a
DM
4576 netif_carrier_off(dev);
4577
aaefae9b
DM
4578 if (!(adapter->flags & FULL_INIT_DONE)) {
4579 err = cxgb_up(adapter);
4580 if (err < 0)
4581 return err;
4582 }
b8ff05a9 4583
f68707b8
DM
4584 err = link_start(dev);
4585 if (!err)
4586 netif_tx_start_all_queues(dev);
4587 return err;
b8ff05a9
DM
4588}
4589
4590static int cxgb_close(struct net_device *dev)
4591{
b8ff05a9
DM
4592 struct port_info *pi = netdev_priv(dev);
4593 struct adapter *adapter = pi->adapter;
4594
4595 netif_tx_stop_all_queues(dev);
4596 netif_carrier_off(dev);
060e0c75 4597 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
4598}
4599
f2b7e78d
VP
4600/* Return an error number if the indicated filter isn't writable ...
4601 */
4602static int writable_filter(struct filter_entry *f)
4603{
4604 if (f->locked)
4605 return -EPERM;
4606 if (f->pending)
4607 return -EBUSY;
4608
4609 return 0;
4610}
4611
4612/* Delete the filter at the specified index (if valid). The checks for all
4613 * the common problems with doing this like the filter being locked, currently
4614 * pending in another operation, etc.
4615 */
4616static int delete_filter(struct adapter *adapter, unsigned int fidx)
4617{
4618 struct filter_entry *f;
4619 int ret;
4620
dca4faeb 4621 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
4622 return -EINVAL;
4623
4624 f = &adapter->tids.ftid_tab[fidx];
4625 ret = writable_filter(f);
4626 if (ret)
4627 return ret;
4628 if (f->valid)
4629 return del_filter_wr(adapter, fidx);
4630
4631 return 0;
4632}
4633
dca4faeb 4634int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
4635 __be32 sip, __be16 sport, __be16 vlan,
4636 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
4637{
4638 int ret;
4639 struct filter_entry *f;
4640 struct adapter *adap;
4641 int i;
4642 u8 *val;
4643
4644 adap = netdev2adap(dev);
4645
1cab775c 4646 /* Adjust stid to correct filter index */
470c60c4 4647 stid -= adap->tids.sftid_base;
1cab775c
VP
4648 stid += adap->tids.nftids;
4649
dca4faeb
VP
4650 /* Check to make sure the filter requested is writable ...
4651 */
4652 f = &adap->tids.ftid_tab[stid];
4653 ret = writable_filter(f);
4654 if (ret)
4655 return ret;
4656
4657 /* Clear out any old resources being used by the filter before
4658 * we start constructing the new filter.
4659 */
4660 if (f->valid)
4661 clear_filter(adap, f);
4662
4663 /* Clear out filter specifications */
4664 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4665 f->fs.val.lport = cpu_to_be16(sport);
4666 f->fs.mask.lport = ~0;
4667 val = (u8 *)&sip;
793dad94 4668 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
4669 for (i = 0; i < 4; i++) {
4670 f->fs.val.lip[i] = val[i];
4671 f->fs.mask.lip[i] = ~0;
4672 }
dcf7b6f5 4673 if (adap->params.tp.vlan_pri_map & F_PORT) {
793dad94
VP
4674 f->fs.val.iport = port;
4675 f->fs.mask.iport = mask;
4676 }
4677 }
dca4faeb 4678
dcf7b6f5 4679 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
7c89e555
KS
4680 f->fs.val.proto = IPPROTO_TCP;
4681 f->fs.mask.proto = ~0;
4682 }
4683
dca4faeb
VP
4684 f->fs.dirsteer = 1;
4685 f->fs.iq = queue;
4686 /* Mark filter as locked */
4687 f->locked = 1;
4688 f->fs.rpttid = 1;
4689
4690 ret = set_filter_wr(adap, stid);
4691 if (ret) {
4692 clear_filter(adap, f);
4693 return ret;
4694 }
4695
4696 return 0;
4697}
4698EXPORT_SYMBOL(cxgb4_create_server_filter);
4699
4700int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4701 unsigned int queue, bool ipv6)
4702{
4703 int ret;
4704 struct filter_entry *f;
4705 struct adapter *adap;
4706
4707 adap = netdev2adap(dev);
1cab775c
VP
4708
4709 /* Adjust stid to correct filter index */
470c60c4 4710 stid -= adap->tids.sftid_base;
1cab775c
VP
4711 stid += adap->tids.nftids;
4712
dca4faeb
VP
4713 f = &adap->tids.ftid_tab[stid];
4714 /* Unlock the filter */
4715 f->locked = 0;
4716
4717 ret = delete_filter(adap, stid);
4718 if (ret)
4719 return ret;
4720
4721 return 0;
4722}
4723EXPORT_SYMBOL(cxgb4_remove_server_filter);
4724
f5152c90
DM
4725static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4726 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
4727{
4728 struct port_stats stats;
4729 struct port_info *p = netdev_priv(dev);
4730 struct adapter *adapter = p->adapter;
b8ff05a9 4731
9fe6cb58
GS
4732 /* Block retrieving statistics during EEH error
4733 * recovery. Otherwise, the recovery might fail
4734 * and the PCI device will be removed permanently
4735 */
b8ff05a9 4736 spin_lock(&adapter->stats_lock);
9fe6cb58
GS
4737 if (!netif_device_present(dev)) {
4738 spin_unlock(&adapter->stats_lock);
4739 return ns;
4740 }
b8ff05a9
DM
4741 t4_get_port_stats(adapter, p->tx_chan, &stats);
4742 spin_unlock(&adapter->stats_lock);
4743
4744 ns->tx_bytes = stats.tx_octets;
4745 ns->tx_packets = stats.tx_frames;
4746 ns->rx_bytes = stats.rx_octets;
4747 ns->rx_packets = stats.rx_frames;
4748 ns->multicast = stats.rx_mcast_frames;
4749
4750 /* detailed rx_errors */
4751 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4752 stats.rx_runt;
4753 ns->rx_over_errors = 0;
4754 ns->rx_crc_errors = stats.rx_fcs_err;
4755 ns->rx_frame_errors = stats.rx_symbol_err;
4756 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4757 stats.rx_ovflow2 + stats.rx_ovflow3 +
4758 stats.rx_trunc0 + stats.rx_trunc1 +
4759 stats.rx_trunc2 + stats.rx_trunc3;
4760 ns->rx_missed_errors = 0;
4761
4762 /* detailed tx_errors */
4763 ns->tx_aborted_errors = 0;
4764 ns->tx_carrier_errors = 0;
4765 ns->tx_fifo_errors = 0;
4766 ns->tx_heartbeat_errors = 0;
4767 ns->tx_window_errors = 0;
4768
4769 ns->tx_errors = stats.tx_error_frames;
4770 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4771 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4772 return ns;
4773}
4774
4775static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4776{
060e0c75 4777 unsigned int mbox;
b8ff05a9
DM
4778 int ret = 0, prtad, devad;
4779 struct port_info *pi = netdev_priv(dev);
4780 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4781
4782 switch (cmd) {
4783 case SIOCGMIIPHY:
4784 if (pi->mdio_addr < 0)
4785 return -EOPNOTSUPP;
4786 data->phy_id = pi->mdio_addr;
4787 break;
4788 case SIOCGMIIREG:
4789 case SIOCSMIIREG:
4790 if (mdio_phy_id_is_c45(data->phy_id)) {
4791 prtad = mdio_phy_id_prtad(data->phy_id);
4792 devad = mdio_phy_id_devad(data->phy_id);
4793 } else if (data->phy_id < 32) {
4794 prtad = data->phy_id;
4795 devad = 0;
4796 data->reg_num &= 0x1f;
4797 } else
4798 return -EINVAL;
4799
060e0c75 4800 mbox = pi->adapter->fn;
b8ff05a9 4801 if (cmd == SIOCGMIIREG)
060e0c75 4802 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4803 data->reg_num, &data->val_out);
4804 else
060e0c75 4805 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
4806 data->reg_num, data->val_in);
4807 break;
4808 default:
4809 return -EOPNOTSUPP;
4810 }
4811 return ret;
4812}
4813
4814static void cxgb_set_rxmode(struct net_device *dev)
4815{
4816 /* unfortunately we can't return errors to the stack */
4817 set_rxmode(dev, -1, false);
4818}
4819
4820static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4821{
4822 int ret;
4823 struct port_info *pi = netdev_priv(dev);
4824
4825 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4826 return -EINVAL;
060e0c75
DM
4827 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4828 -1, -1, -1, true);
b8ff05a9
DM
4829 if (!ret)
4830 dev->mtu = new_mtu;
4831 return ret;
4832}
4833
4834static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4835{
4836 int ret;
4837 struct sockaddr *addr = p;
4838 struct port_info *pi = netdev_priv(dev);
4839
4840 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 4841 return -EADDRNOTAVAIL;
b8ff05a9 4842
060e0c75
DM
4843 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4844 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
4845 if (ret < 0)
4846 return ret;
4847
4848 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4849 pi->xact_addr_filt = ret;
4850 return 0;
4851}
4852
b8ff05a9
DM
4853#ifdef CONFIG_NET_POLL_CONTROLLER
4854static void cxgb_netpoll(struct net_device *dev)
4855{
4856 struct port_info *pi = netdev_priv(dev);
4857 struct adapter *adap = pi->adapter;
4858
4859 if (adap->flags & USING_MSIX) {
4860 int i;
4861 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4862
4863 for (i = pi->nqsets; i; i--, rx++)
4864 t4_sge_intr_msix(0, &rx->rspq);
4865 } else
4866 t4_intr_handler(adap)(0, adap);
4867}
4868#endif
4869
4870static const struct net_device_ops cxgb4_netdev_ops = {
4871 .ndo_open = cxgb_open,
4872 .ndo_stop = cxgb_close,
4873 .ndo_start_xmit = t4_eth_xmit,
688848b1 4874 .ndo_select_queue = cxgb_select_queue,
9be793bf 4875 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
4876 .ndo_set_rx_mode = cxgb_set_rxmode,
4877 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 4878 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
4879 .ndo_validate_addr = eth_validate_addr,
4880 .ndo_do_ioctl = cxgb_ioctl,
4881 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
4882#ifdef CONFIG_NET_POLL_CONTROLLER
4883 .ndo_poll_controller = cxgb_netpoll,
4884#endif
4885};
4886
4887void t4_fatal_err(struct adapter *adap)
4888{
4889 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4890 t4_intr_disable(adap);
4891 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4892}
4893
0abfd152
HS
4894/* Return the specified PCI-E Configuration Space register from our Physical
4895 * Function. We try first via a Firmware LDST Command since we prefer to let
4896 * the firmware own all of these registers, but if that fails we go for it
4897 * directly ourselves.
4898 */
4899static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4900{
4901 struct fw_ldst_cmd ldst_cmd;
4902 u32 val;
4903 int ret;
4904
4905 /* Construct and send the Firmware LDST Command to retrieve the
4906 * specified PCI-E Configuration Space register.
4907 */
4908 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4909 ldst_cmd.op_to_addrspace =
4910 htonl(FW_CMD_OP(FW_LDST_CMD) |
4911 FW_CMD_REQUEST |
4912 FW_CMD_READ |
4913 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4914 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4915 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4916 ldst_cmd.u.pcie.ctrl_to_fn =
4917 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4918 ldst_cmd.u.pcie.r = reg;
4919 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4920 &ldst_cmd);
4921
4922 /* If the LDST Command suucceeded, exctract the returned register
4923 * value. Otherwise read it directly ourself.
4924 */
4925 if (ret == 0)
4926 val = ntohl(ldst_cmd.u.pcie.data[0]);
4927 else
4928 t4_hw_pci_read_cfg4(adap, reg, &val);
4929
4930 return val;
4931}
4932
b8ff05a9
DM
4933static void setup_memwin(struct adapter *adap)
4934{
0abfd152 4935 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
b8ff05a9 4936
d14807dd 4937 if (is_t4(adap->params.chip)) {
0abfd152
HS
4938 u32 bar0;
4939
4940 /* Truncation intentional: we only read the bottom 32-bits of
4941 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4942 * mechanism to read BAR0 instead of using
4943 * pci_resource_start() because we could be operating from
4944 * within a Virtual Machine which is trapping our accesses to
4945 * our Configuration Space and we need to set up the PCI-E
4946 * Memory Window decoders with the actual addresses which will
4947 * be coming across the PCI-E link.
4948 */
4949 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4950 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4951 adap->t4_bar0 = bar0;
4952
19dd37ba
SR
4953 mem_win0_base = bar0 + MEMWIN0_BASE;
4954 mem_win1_base = bar0 + MEMWIN1_BASE;
4955 mem_win2_base = bar0 + MEMWIN2_BASE;
0abfd152 4956 mem_win2_aperture = MEMWIN2_APERTURE;
19dd37ba
SR
4957 } else {
4958 /* For T5, only relative offset inside the PCIe BAR is passed */
4959 mem_win0_base = MEMWIN0_BASE;
0abfd152 4960 mem_win1_base = MEMWIN1_BASE;
19dd37ba 4961 mem_win2_base = MEMWIN2_BASE_T5;
0abfd152 4962 mem_win2_aperture = MEMWIN2_APERTURE_T5;
19dd37ba 4963 }
b8ff05a9 4964 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
19dd37ba 4965 mem_win0_base | BIR(0) |
b8ff05a9
DM
4966 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4967 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
19dd37ba 4968 mem_win1_base | BIR(0) |
b8ff05a9
DM
4969 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4970 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
19dd37ba 4971 mem_win2_base | BIR(0) |
0abfd152
HS
4972 WINDOW(ilog2(mem_win2_aperture) - 10));
4973 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
636f9d37
VP
4974}
4975
4976static void setup_memwin_rdma(struct adapter *adap)
4977{
1ae970e0 4978 if (adap->vres.ocq.size) {
0abfd152
HS
4979 u32 start;
4980 unsigned int sz_kb;
1ae970e0 4981
0abfd152
HS
4982 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4983 start &= PCI_BASE_ADDRESS_MEM_MASK;
4984 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
1ae970e0
DM
4985 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4986 t4_write_reg(adap,
4987 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4988 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4989 t4_write_reg(adap,
4990 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4991 adap->vres.ocq.start);
4992 t4_read_reg(adap,
4993 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4994 }
b8ff05a9
DM
4995}
4996
02b5fb8e
DM
4997static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4998{
4999 u32 v;
5000 int ret;
5001
5002 /* get device capabilities */
5003 memset(c, 0, sizeof(*c));
5004 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5005 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 5006 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
060e0c75 5007 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
5008 if (ret < 0)
5009 return ret;
5010
5011 /* select capabilities we'll be using */
5012 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5013 if (!vf_acls)
5014 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5015 else
5016 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5017 } else if (vf_acls) {
5018 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5019 return ret;
5020 }
5021 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5022 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 5023 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
5024 if (ret < 0)
5025 return ret;
5026
060e0c75 5027 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
5028 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5029 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5030 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5031 if (ret < 0)
5032 return ret;
5033
060e0c75
DM
5034 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5035 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
5036 if (ret < 0)
5037 return ret;
5038
5039 t4_sge_init(adap);
5040
02b5fb8e
DM
5041 /* tweak some settings */
5042 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5043 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5044 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5045 v = t4_read_reg(adap, TP_PIO_DATA);
5046 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75 5047
dca4faeb
VP
5048 /* first 4 Tx modulation queues point to consecutive Tx channels */
5049 adap->params.tp.tx_modq_map = 0xE4;
5050 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5051 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5052
5053 /* associate each Tx modulation queue with consecutive Tx channels */
5054 v = 0x84218421;
5055 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5056 &v, 1, A_TP_TX_SCHED_HDR);
5057 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5058 &v, 1, A_TP_TX_SCHED_FIFO);
5059 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5060 &v, 1, A_TP_TX_SCHED_PCMD);
5061
5062#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5063 if (is_offload(adap)) {
5064 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5065 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5066 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5067 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5068 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5069 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5070 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5071 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5072 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5073 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5074 }
5075
060e0c75
DM
5076 /* get basic stuff going */
5077 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
5078}
5079
b8ff05a9
DM
5080/*
5081 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5082 */
5083#define MAX_ATIDS 8192U
5084
636f9d37
VP
5085/*
5086 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5087 *
5088 * If the firmware we're dealing with has Configuration File support, then
5089 * we use that to perform all configuration
5090 */
5091
5092/*
5093 * Tweak configuration based on module parameters, etc. Most of these have
5094 * defaults assigned to them by Firmware Configuration Files (if we're using
5095 * them) but need to be explicitly set if we're using hard-coded
5096 * initialization. But even in the case of using Firmware Configuration
5097 * Files, we'd like to expose the ability to change these via module
5098 * parameters so these are essentially common tweaks/settings for
5099 * Configuration Files and hard-coded initialization ...
5100 */
5101static int adap_init0_tweaks(struct adapter *adapter)
5102{
5103 /*
5104 * Fix up various Host-Dependent Parameters like Page Size, Cache
5105 * Line Size, etc. The firmware default is for a 4KB Page Size and
5106 * 64B Cache Line Size ...
5107 */
5108 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5109
5110 /*
5111 * Process module parameters which affect early initialization.
5112 */
5113 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5114 dev_err(&adapter->pdev->dev,
5115 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5116 rx_dma_offset);
5117 rx_dma_offset = 2;
5118 }
5119 t4_set_reg_field(adapter, SGE_CONTROL,
5120 PKTSHIFT_MASK,
5121 PKTSHIFT(rx_dma_offset));
5122
5123 /*
5124 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5125 * adds the pseudo header itself.
5126 */
5127 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5128 CSUM_HAS_PSEUDO_HDR, 0);
5129
5130 return 0;
5131}
5132
5133/*
5134 * Attempt to initialize the adapter via a Firmware Configuration File.
5135 */
5136static int adap_init0_config(struct adapter *adapter, int reset)
5137{
5138 struct fw_caps_config_cmd caps_cmd;
5139 const struct firmware *cf;
5140 unsigned long mtype = 0, maddr = 0;
5141 u32 finiver, finicsum, cfcsum;
16e47624
HS
5142 int ret;
5143 int config_issued = 0;
0a57a536 5144 char *fw_config_file, fw_config_file_path[256];
16e47624 5145 char *config_name = NULL;
636f9d37
VP
5146
5147 /*
5148 * Reset device if necessary.
5149 */
5150 if (reset) {
5151 ret = t4_fw_reset(adapter, adapter->mbox,
5152 PIORSTMODE | PIORST);
5153 if (ret < 0)
5154 goto bye;
5155 }
5156
5157 /*
5158 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5159 * then use that. Otherwise, use the configuration file stored
5160 * in the adapter flash ...
5161 */
d14807dd 5162 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 5163 case CHELSIO_T4:
16e47624 5164 fw_config_file = FW4_CFNAME;
0a57a536
SR
5165 break;
5166 case CHELSIO_T5:
5167 fw_config_file = FW5_CFNAME;
5168 break;
5169 default:
5170 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5171 adapter->pdev->device);
5172 ret = -EINVAL;
5173 goto bye;
5174 }
5175
5176 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 5177 if (ret < 0) {
16e47624 5178 config_name = "On FLASH";
636f9d37
VP
5179 mtype = FW_MEMTYPE_CF_FLASH;
5180 maddr = t4_flash_cfg_addr(adapter);
5181 } else {
5182 u32 params[7], val[7];
5183
16e47624
HS
5184 sprintf(fw_config_file_path,
5185 "/lib/firmware/%s", fw_config_file);
5186 config_name = fw_config_file_path;
5187
636f9d37
VP
5188 if (cf->size >= FLASH_CFG_MAX_SIZE)
5189 ret = -ENOMEM;
5190 else {
5191 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5192 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5193 ret = t4_query_params(adapter, adapter->mbox,
5194 adapter->fn, 0, 1, params, val);
5195 if (ret == 0) {
5196 /*
fc5ab020 5197 * For t4_memory_rw() below addresses and
636f9d37
VP
5198 * sizes have to be in terms of multiples of 4
5199 * bytes. So, if the Configuration File isn't
5200 * a multiple of 4 bytes in length we'll have
5201 * to write that out separately since we can't
5202 * guarantee that the bytes following the
5203 * residual byte in the buffer returned by
5204 * request_firmware() are zeroed out ...
5205 */
5206 size_t resid = cf->size & 0x3;
5207 size_t size = cf->size & ~0x3;
5208 __be32 *data = (__be32 *)cf->data;
5209
5210 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5211 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5212
fc5ab020
HS
5213 spin_lock(&adapter->win0_lock);
5214 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5215 size, data, T4_MEMORY_WRITE);
636f9d37
VP
5216 if (ret == 0 && resid != 0) {
5217 union {
5218 __be32 word;
5219 char buf[4];
5220 } last;
5221 int i;
5222
5223 last.word = data[size >> 2];
5224 for (i = resid; i < 4; i++)
5225 last.buf[i] = 0;
fc5ab020
HS
5226 ret = t4_memory_rw(adapter, 0, mtype,
5227 maddr + size,
5228 4, &last.word,
5229 T4_MEMORY_WRITE);
636f9d37 5230 }
fc5ab020 5231 spin_unlock(&adapter->win0_lock);
636f9d37
VP
5232 }
5233 }
5234
5235 release_firmware(cf);
5236 if (ret)
5237 goto bye;
5238 }
5239
5240 /*
5241 * Issue a Capability Configuration command to the firmware to get it
5242 * to parse the Configuration File. We don't use t4_fw_config_file()
5243 * because we want the ability to modify various features after we've
5244 * processed the configuration file ...
5245 */
5246 memset(&caps_cmd, 0, sizeof(caps_cmd));
5247 caps_cmd.op_to_write =
5248 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5249 FW_CMD_REQUEST |
5250 FW_CMD_READ);
ce91a923 5251 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
5252 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5253 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5254 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5255 FW_LEN16(caps_cmd));
5256 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5257 &caps_cmd);
16e47624
HS
5258
5259 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5260 * Configuration File in FLASH), our last gasp effort is to use the
5261 * Firmware Configuration File which is embedded in the firmware. A
5262 * very few early versions of the firmware didn't have one embedded
5263 * but we can ignore those.
5264 */
5265 if (ret == -ENOENT) {
5266 memset(&caps_cmd, 0, sizeof(caps_cmd));
5267 caps_cmd.op_to_write =
5268 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5269 FW_CMD_REQUEST |
5270 FW_CMD_READ);
5271 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5272 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5273 sizeof(caps_cmd), &caps_cmd);
5274 config_name = "Firmware Default";
5275 }
5276
5277 config_issued = 1;
636f9d37
VP
5278 if (ret < 0)
5279 goto bye;
5280
5281 finiver = ntohl(caps_cmd.finiver);
5282 finicsum = ntohl(caps_cmd.finicsum);
5283 cfcsum = ntohl(caps_cmd.cfcsum);
5284 if (finicsum != cfcsum)
5285 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5286 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5287 finicsum, cfcsum);
5288
636f9d37
VP
5289 /*
5290 * And now tell the firmware to use the configuration we just loaded.
5291 */
5292 caps_cmd.op_to_write =
5293 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5294 FW_CMD_REQUEST |
5295 FW_CMD_WRITE);
ce91a923 5296 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
5297 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5298 NULL);
5299 if (ret < 0)
5300 goto bye;
5301
5302 /*
5303 * Tweak configuration based on system architecture, module
5304 * parameters, etc.
5305 */
5306 ret = adap_init0_tweaks(adapter);
5307 if (ret < 0)
5308 goto bye;
5309
5310 /*
5311 * And finally tell the firmware to initialize itself using the
5312 * parameters from the Configuration File.
5313 */
5314 ret = t4_fw_initialize(adapter, adapter->mbox);
5315 if (ret < 0)
5316 goto bye;
5317
5318 /*
5319 * Return successfully and note that we're operating with parameters
5320 * not supplied by the driver, rather than from hard-wired
5321 * initialization constants burried in the driver.
5322 */
5323 adapter->flags |= USING_SOFT_PARAMS;
5324 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
5325 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5326 config_name, finiver, cfcsum);
636f9d37
VP
5327 return 0;
5328
5329 /*
5330 * Something bad happened. Return the error ... (If the "error"
5331 * is that there's no Configuration File on the adapter we don't
5332 * want to issue a warning since this is fairly common.)
5333 */
5334bye:
16e47624
HS
5335 if (config_issued && ret != -ENOENT)
5336 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5337 config_name, -ret);
636f9d37
VP
5338 return ret;
5339}
5340
13ee15d3
VP
5341/*
5342 * Attempt to initialize the adapter via hard-coded, driver supplied
5343 * parameters ...
5344 */
5345static int adap_init0_no_config(struct adapter *adapter, int reset)
5346{
5347 struct sge *s = &adapter->sge;
5348 struct fw_caps_config_cmd caps_cmd;
5349 u32 v;
5350 int i, ret;
5351
5352 /*
5353 * Reset device if necessary
5354 */
5355 if (reset) {
5356 ret = t4_fw_reset(adapter, adapter->mbox,
5357 PIORSTMODE | PIORST);
5358 if (ret < 0)
5359 goto bye;
5360 }
5361
5362 /*
5363 * Get device capabilities and select which we'll be using.
5364 */
5365 memset(&caps_cmd, 0, sizeof(caps_cmd));
5366 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5367 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 5368 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
13ee15d3
VP
5369 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5370 &caps_cmd);
5371 if (ret < 0)
5372 goto bye;
5373
13ee15d3
VP
5374 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5375 if (!vf_acls)
5376 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5377 else
5378 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5379 } else if (vf_acls) {
5380 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5381 goto bye;
5382 }
5383 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5384 FW_CMD_REQUEST | FW_CMD_WRITE);
5385 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5386 NULL);
5387 if (ret < 0)
5388 goto bye;
5389
5390 /*
5391 * Tweak configuration based on system architecture, module
5392 * parameters, etc.
5393 */
5394 ret = adap_init0_tweaks(adapter);
5395 if (ret < 0)
5396 goto bye;
5397
5398 /*
5399 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5400 * mode which maps each Virtual Interface to its own section of
5401 * the RSS Table and we turn on all map and hash enables ...
5402 */
5403 adapter->flags |= RSS_TNLALLLOOKUP;
5404 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5405 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5406 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5407 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5408 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5409 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5410 if (ret < 0)
5411 goto bye;
5412
5413 /*
5414 * Set up our own fundamental resource provisioning ...
5415 */
5416 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5417 PFRES_NEQ, PFRES_NETHCTRL,
5418 PFRES_NIQFLINT, PFRES_NIQ,
5419 PFRES_TC, PFRES_NVI,
5420 FW_PFVF_CMD_CMASK_MASK,
5421 pfvfres_pmask(adapter, adapter->fn, 0),
5422 PFRES_NEXACTF,
5423 PFRES_R_CAPS, PFRES_WX_CAPS);
5424 if (ret < 0)
5425 goto bye;
5426
5427 /*
5428 * Perform low level SGE initialization. We need to do this before we
5429 * send the firmware the INITIALIZE command because that will cause
5430 * any other PF Drivers which are waiting for the Master
5431 * Initialization to proceed forward.
5432 */
5433 for (i = 0; i < SGE_NTIMERS - 1; i++)
5434 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5435 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5436 s->counter_val[0] = 1;
5437 for (i = 1; i < SGE_NCOUNTERS; i++)
5438 s->counter_val[i] = min(intr_cnt[i - 1],
5439 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5440 t4_sge_init(adapter);
5441
5442#ifdef CONFIG_PCI_IOV
5443 /*
5444 * Provision resource limits for Virtual Functions. We currently
5445 * grant them all the same static resource limits except for the Port
5446 * Access Rights Mask which we're assigning based on the PF. All of
5447 * the static provisioning stuff for both the PF and VF really needs
5448 * to be managed in a persistent manner for each device which the
5449 * firmware controls.
5450 */
5451 {
5452 int pf, vf;
5453
7d6727cf 5454 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
13ee15d3
VP
5455 if (num_vf[pf] <= 0)
5456 continue;
5457
5458 /* VF numbering starts at 1! */
5459 for (vf = 1; vf <= num_vf[pf]; vf++) {
5460 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5461 pf, vf,
5462 VFRES_NEQ, VFRES_NETHCTRL,
5463 VFRES_NIQFLINT, VFRES_NIQ,
5464 VFRES_TC, VFRES_NVI,
1f1e4958 5465 FW_PFVF_CMD_CMASK_MASK,
13ee15d3
VP
5466 pfvfres_pmask(
5467 adapter, pf, vf),
5468 VFRES_NEXACTF,
5469 VFRES_R_CAPS, VFRES_WX_CAPS);
5470 if (ret < 0)
5471 dev_warn(adapter->pdev_dev,
5472 "failed to "\
5473 "provision pf/vf=%d/%d; "
5474 "err=%d\n", pf, vf, ret);
5475 }
5476 }
5477 }
5478#endif
5479
5480 /*
5481 * Set up the default filter mode. Later we'll want to implement this
5482 * via a firmware command, etc. ... This needs to be done before the
5483 * firmare initialization command ... If the selected set of fields
5484 * isn't equal to the default value, we'll need to make sure that the
5485 * field selections will fit in the 36-bit budget.
5486 */
5487 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
404d9e3f 5488 int j, bits = 0;
13ee15d3 5489
404d9e3f
VP
5490 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5491 switch (tp_vlan_pri_map & (1 << j)) {
13ee15d3
VP
5492 case 0:
5493 /* compressed filter field not enabled */
5494 break;
5495 case FCOE_MASK:
5496 bits += 1;
5497 break;
5498 case PORT_MASK:
5499 bits += 3;
5500 break;
5501 case VNIC_ID_MASK:
5502 bits += 17;
5503 break;
5504 case VLAN_MASK:
5505 bits += 17;
5506 break;
5507 case TOS_MASK:
5508 bits += 8;
5509 break;
5510 case PROTOCOL_MASK:
5511 bits += 8;
5512 break;
5513 case ETHERTYPE_MASK:
5514 bits += 16;
5515 break;
5516 case MACMATCH_MASK:
5517 bits += 9;
5518 break;
5519 case MPSHITTYPE_MASK:
5520 bits += 3;
5521 break;
5522 case FRAGMENTATION_MASK:
5523 bits += 1;
5524 break;
5525 }
5526
5527 if (bits > 36) {
5528 dev_err(adapter->pdev_dev,
5529 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5530 " using %#x\n", tp_vlan_pri_map, bits,
5531 TP_VLAN_PRI_MAP_DEFAULT);
5532 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5533 }
5534 }
5535 v = tp_vlan_pri_map;
5536 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5537 &v, 1, TP_VLAN_PRI_MAP);
5538
5539 /*
5540 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5541 * to support any of the compressed filter fields above. Newer
5542 * versions of the firmware do this automatically but it doesn't hurt
5543 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5544 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5545 * since the firmware automatically turns this on and off when we have
5546 * a non-zero number of filters active (since it does have a
5547 * performance impact).
5548 */
5549 if (tp_vlan_pri_map)
5550 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5551 FIVETUPLELOOKUP_MASK,
5552 FIVETUPLELOOKUP_MASK);
5553
5554 /*
5555 * Tweak some settings.
5556 */
5557 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5558 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5559 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5560 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5561
5562 /*
5563 * Get basic stuff going by issuing the Firmware Initialize command.
5564 * Note that this _must_ be after all PFVF commands ...
5565 */
5566 ret = t4_fw_initialize(adapter, adapter->mbox);
5567 if (ret < 0)
5568 goto bye;
5569
5570 /*
5571 * Return successfully!
5572 */
5573 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5574 "driver parameters\n");
5575 return 0;
5576
5577 /*
5578 * Something bad happened. Return the error ...
5579 */
5580bye:
5581 return ret;
5582}
5583
16e47624
HS
5584static struct fw_info fw_info_array[] = {
5585 {
5586 .chip = CHELSIO_T4,
5587 .fs_name = FW4_CFNAME,
5588 .fw_mod_name = FW4_FNAME,
5589 .fw_hdr = {
5590 .chip = FW_HDR_CHIP_T4,
5591 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5592 .intfver_nic = FW_INTFVER(T4, NIC),
5593 .intfver_vnic = FW_INTFVER(T4, VNIC),
5594 .intfver_ri = FW_INTFVER(T4, RI),
5595 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5596 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5597 },
5598 }, {
5599 .chip = CHELSIO_T5,
5600 .fs_name = FW5_CFNAME,
5601 .fw_mod_name = FW5_FNAME,
5602 .fw_hdr = {
5603 .chip = FW_HDR_CHIP_T5,
5604 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5605 .intfver_nic = FW_INTFVER(T5, NIC),
5606 .intfver_vnic = FW_INTFVER(T5, VNIC),
5607 .intfver_ri = FW_INTFVER(T5, RI),
5608 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5609 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5610 },
5611 }
5612};
5613
5614static struct fw_info *find_fw_info(int chip)
5615{
5616 int i;
5617
5618 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5619 if (fw_info_array[i].chip == chip)
5620 return &fw_info_array[i];
5621 }
5622 return NULL;
5623}
5624
b8ff05a9
DM
5625/*
5626 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5627 */
5628static int adap_init0(struct adapter *adap)
5629{
5630 int ret;
5631 u32 v, port_vec;
5632 enum dev_state state;
5633 u32 params[7], val[7];
9a4da2cd 5634 struct fw_caps_config_cmd caps_cmd;
dcf7b6f5 5635 int reset = 1;
b8ff05a9 5636
636f9d37
VP
5637 /*
5638 * Contact FW, advertising Master capability (and potentially forcing
5639 * ourselves as the Master PF if our module parameter force_init is
5640 * set).
5641 */
5642 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5643 force_init ? MASTER_MUST : MASTER_MAY,
5644 &state);
b8ff05a9
DM
5645 if (ret < 0) {
5646 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5647 ret);
5648 return ret;
5649 }
636f9d37
VP
5650 if (ret == adap->mbox)
5651 adap->flags |= MASTER_PF;
5652 if (force_init && state == DEV_STATE_INIT)
5653 state = DEV_STATE_UNINIT;
b8ff05a9 5654
636f9d37
VP
5655 /*
5656 * If we're the Master PF Driver and the device is uninitialized,
5657 * then let's consider upgrading the firmware ... (We always want
5658 * to check the firmware version number in order to A. get it for
5659 * later reporting and B. to warn if the currently loaded firmware
5660 * is excessively mismatched relative to the driver.)
5661 */
16e47624
HS
5662 t4_get_fw_version(adap, &adap->params.fw_vers);
5663 t4_get_tp_version(adap, &adap->params.tp_vers);
636f9d37 5664 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
5665 struct fw_info *fw_info;
5666 struct fw_hdr *card_fw;
5667 const struct firmware *fw;
5668 const u8 *fw_data = NULL;
5669 unsigned int fw_size = 0;
5670
5671 /* This is the firmware whose headers the driver was compiled
5672 * against
5673 */
5674 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5675 if (fw_info == NULL) {
5676 dev_err(adap->pdev_dev,
5677 "unable to get firmware info for chip %d.\n",
5678 CHELSIO_CHIP_VERSION(adap->params.chip));
5679 return -EINVAL;
636f9d37 5680 }
16e47624
HS
5681
5682 /* allocate memory to read the header of the firmware on the
5683 * card
5684 */
5685 card_fw = t4_alloc_mem(sizeof(*card_fw));
5686
5687 /* Get FW from from /lib/firmware/ */
5688 ret = request_firmware(&fw, fw_info->fw_mod_name,
5689 adap->pdev_dev);
5690 if (ret < 0) {
5691 dev_err(adap->pdev_dev,
5692 "unable to load firmware image %s, error %d\n",
5693 fw_info->fw_mod_name, ret);
5694 } else {
5695 fw_data = fw->data;
5696 fw_size = fw->size;
5697 }
5698
5699 /* upgrade FW logic */
5700 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5701 state, &reset);
5702
5703 /* Cleaning up */
5704 if (fw != NULL)
5705 release_firmware(fw);
5706 t4_free_mem(card_fw);
5707
636f9d37 5708 if (ret < 0)
16e47624 5709 goto bye;
636f9d37 5710 }
b8ff05a9 5711
636f9d37
VP
5712 /*
5713 * Grab VPD parameters. This should be done after we establish a
5714 * connection to the firmware since some of the VPD parameters
5715 * (notably the Core Clock frequency) are retrieved via requests to
5716 * the firmware. On the other hand, we need these fairly early on
5717 * so we do this right after getting ahold of the firmware.
5718 */
5719 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
5720 if (ret < 0)
5721 goto bye;
a0881cab 5722
636f9d37 5723 /*
13ee15d3
VP
5724 * Find out what ports are available to us. Note that we need to do
5725 * this before calling adap_init0_no_config() since it needs nports
5726 * and portvec ...
636f9d37
VP
5727 */
5728 v =
5729 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5730 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5731 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
5732 if (ret < 0)
5733 goto bye;
5734
636f9d37
VP
5735 adap->params.nports = hweight32(port_vec);
5736 adap->params.portvec = port_vec;
5737
5738 /*
5739 * If the firmware is initialized already (and we're not forcing a
5740 * master initialization), note that we're living with existing
5741 * adapter parameters. Otherwise, it's time to try initializing the
5742 * adapter ...
5743 */
5744 if (state == DEV_STATE_INIT) {
5745 dev_info(adap->pdev_dev, "Coming up as %s: "\
5746 "Adapter already initialized\n",
5747 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5748 adap->flags |= USING_SOFT_PARAMS;
5749 } else {
5750 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5751 "Initializing adapter\n");
636f9d37
VP
5752
5753 /*
5754 * If the firmware doesn't support Configuration
5755 * Files warn user and exit,
5756 */
5757 if (ret < 0)
13ee15d3 5758 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 5759 "configuration file.\n");
13ee15d3
VP
5760 if (force_old_init)
5761 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
5762 else {
5763 /*
13ee15d3
VP
5764 * Find out whether we're dealing with a version of
5765 * the firmware which has configuration file support.
636f9d37 5766 */
13ee15d3
VP
5767 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5768 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5769 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5770 params, val);
636f9d37 5771
13ee15d3
VP
5772 /*
5773 * If the firmware doesn't support Configuration
5774 * Files, use the old Driver-based, hard-wired
5775 * initialization. Otherwise, try using the
5776 * Configuration File support and fall back to the
5777 * Driver-based initialization if there's no
5778 * Configuration File found.
5779 */
5780 if (ret < 0)
5781 ret = adap_init0_no_config(adap, reset);
5782 else {
5783 /*
5784 * The firmware provides us with a memory
5785 * buffer where we can load a Configuration
5786 * File from the host if we want to override
5787 * the Configuration File in flash.
5788 */
5789
5790 ret = adap_init0_config(adap, reset);
5791 if (ret == -ENOENT) {
5792 dev_info(adap->pdev_dev,
5793 "No Configuration File present "
16e47624 5794 "on adapter. Using hard-wired "
13ee15d3
VP
5795 "configuration parameters.\n");
5796 ret = adap_init0_no_config(adap, reset);
5797 }
636f9d37
VP
5798 }
5799 }
5800 if (ret < 0) {
5801 dev_err(adap->pdev_dev,
5802 "could not initialize adapter, error %d\n",
5803 -ret);
5804 goto bye;
5805 }
5806 }
5807
5808 /*
5809 * If we're living with non-hard-coded parameters (either from a
5810 * Firmware Configuration File or values programmed by a different PF
5811 * Driver), give the SGE code a chance to pull in anything that it
5812 * needs ... Note that this must be called after we retrieve our VPD
5813 * parameters in order to know how to convert core ticks to seconds.
5814 */
5815 if (adap->flags & USING_SOFT_PARAMS) {
5816 ret = t4_sge_init(adap);
5817 if (ret < 0)
5818 goto bye;
5819 }
5820
9a4da2cd
VP
5821 if (is_bypass_device(adap->pdev->device))
5822 adap->params.bypass = 1;
5823
636f9d37
VP
5824 /*
5825 * Grab some of our basic fundamental operating parameters.
5826 */
5827#define FW_PARAM_DEV(param) \
5828 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5829 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5830
b8ff05a9 5831#define FW_PARAM_PFVF(param) \
636f9d37
VP
5832 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5833 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5834 FW_PARAMS_PARAM_Y(0) | \
5835 FW_PARAMS_PARAM_Z(0)
b8ff05a9 5836
636f9d37 5837 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
5838 params[1] = FW_PARAM_PFVF(L2T_START);
5839 params[2] = FW_PARAM_PFVF(L2T_END);
5840 params[3] = FW_PARAM_PFVF(FILTER_START);
5841 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 5842 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 5843 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
5844 if (ret < 0)
5845 goto bye;
636f9d37
VP
5846 adap->sge.egr_start = val[0];
5847 adap->l2t_start = val[1];
5848 adap->l2t_end = val[2];
b8ff05a9
DM
5849 adap->tids.ftid_base = val[3];
5850 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 5851 adap->sge.ingr_start = val[5];
b8ff05a9 5852
636f9d37
VP
5853 /* query params related to active filter region */
5854 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5855 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5856 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5857 /* If Active filter size is set we enable establishing
5858 * offload connection through firmware work request
5859 */
5860 if ((val[0] != val[1]) && (ret >= 0)) {
5861 adap->flags |= FW_OFLD_CONN;
5862 adap->tids.aftid_base = val[0];
5863 adap->tids.aftid_end = val[1];
5864 }
5865
b407a4a9
VP
5866 /* If we're running on newer firmware, let it know that we're
5867 * prepared to deal with encapsulated CPL messages. Older
5868 * firmware won't understand this and we'll just get
5869 * unencapsulated messages ...
5870 */
5871 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5872 val[0] = 1;
5873 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5874
1ac0f095
KS
5875 /*
5876 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5877 * capability. Earlier versions of the firmware didn't have the
5878 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5879 * permission to use ULPTX MEMWRITE DSGL.
5880 */
5881 if (is_t4(adap->params.chip)) {
5882 adap->params.ulptx_memwrite_dsgl = false;
5883 } else {
5884 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5885 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5886 1, params, val);
5887 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5888 }
5889
636f9d37
VP
5890 /*
5891 * Get device capabilities so we can determine what resources we need
5892 * to manage.
5893 */
5894 memset(&caps_cmd, 0, sizeof(caps_cmd));
9a4da2cd 5895 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3 5896 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 5897 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
5898 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5899 &caps_cmd);
5900 if (ret < 0)
5901 goto bye;
5902
13ee15d3 5903 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
5904 /* query offload-related parameters */
5905 params[0] = FW_PARAM_DEV(NTID);
5906 params[1] = FW_PARAM_PFVF(SERVER_START);
5907 params[2] = FW_PARAM_PFVF(SERVER_END);
5908 params[3] = FW_PARAM_PFVF(TDDP_START);
5909 params[4] = FW_PARAM_PFVF(TDDP_END);
5910 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
5911 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5912 params, val);
b8ff05a9
DM
5913 if (ret < 0)
5914 goto bye;
5915 adap->tids.ntids = val[0];
5916 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5917 adap->tids.stid_base = val[1];
5918 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
5919 /*
5920 * Setup server filter region. Divide the availble filter
5921 * region into two parts. Regular filters get 1/3rd and server
5922 * filters get 2/3rd part. This is only enabled if workarond
5923 * path is enabled.
5924 * 1. For regular filters.
5925 * 2. Server filter: This are special filters which are used
5926 * to redirect SYN packets to offload queue.
5927 */
5928 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5929 adap->tids.sftid_base = adap->tids.ftid_base +
5930 DIV_ROUND_UP(adap->tids.nftids, 3);
5931 adap->tids.nsftids = adap->tids.nftids -
5932 DIV_ROUND_UP(adap->tids.nftids, 3);
5933 adap->tids.nftids = adap->tids.sftid_base -
5934 adap->tids.ftid_base;
5935 }
b8ff05a9
DM
5936 adap->vres.ddp.start = val[3];
5937 adap->vres.ddp.size = val[4] - val[3] + 1;
5938 adap->params.ofldq_wr_cred = val[5];
636f9d37 5939
b8ff05a9
DM
5940 adap->params.offload = 1;
5941 }
636f9d37 5942 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
5943 params[0] = FW_PARAM_PFVF(STAG_START);
5944 params[1] = FW_PARAM_PFVF(STAG_END);
5945 params[2] = FW_PARAM_PFVF(RQ_START);
5946 params[3] = FW_PARAM_PFVF(RQ_END);
5947 params[4] = FW_PARAM_PFVF(PBL_START);
5948 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
5949 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5950 params, val);
b8ff05a9
DM
5951 if (ret < 0)
5952 goto bye;
5953 adap->vres.stag.start = val[0];
5954 adap->vres.stag.size = val[1] - val[0] + 1;
5955 adap->vres.rq.start = val[2];
5956 adap->vres.rq.size = val[3] - val[2] + 1;
5957 adap->vres.pbl.start = val[4];
5958 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
5959
5960 params[0] = FW_PARAM_PFVF(SQRQ_START);
5961 params[1] = FW_PARAM_PFVF(SQRQ_END);
5962 params[2] = FW_PARAM_PFVF(CQ_START);
5963 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
5964 params[4] = FW_PARAM_PFVF(OCQ_START);
5965 params[5] = FW_PARAM_PFVF(OCQ_END);
5c937dd3
HS
5966 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5967 val);
a0881cab
DM
5968 if (ret < 0)
5969 goto bye;
5970 adap->vres.qp.start = val[0];
5971 adap->vres.qp.size = val[1] - val[0] + 1;
5972 adap->vres.cq.start = val[2];
5973 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
5974 adap->vres.ocq.start = val[4];
5975 adap->vres.ocq.size = val[5] - val[4] + 1;
4c2c5763
HS
5976
5977 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5978 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5c937dd3
HS
5979 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5980 val);
4c2c5763
HS
5981 if (ret < 0) {
5982 adap->params.max_ordird_qp = 8;
5983 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5984 ret = 0;
5985 } else {
5986 adap->params.max_ordird_qp = val[0];
5987 adap->params.max_ird_adapter = val[1];
5988 }
5989 dev_info(adap->pdev_dev,
5990 "max_ordird_qp %d max_ird_adapter %d\n",
5991 adap->params.max_ordird_qp,
5992 adap->params.max_ird_adapter);
b8ff05a9 5993 }
636f9d37 5994 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
5995 params[0] = FW_PARAM_PFVF(ISCSI_START);
5996 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
5997 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5998 params, val);
b8ff05a9
DM
5999 if (ret < 0)
6000 goto bye;
6001 adap->vres.iscsi.start = val[0];
6002 adap->vres.iscsi.size = val[1] - val[0] + 1;
6003 }
6004#undef FW_PARAM_PFVF
6005#undef FW_PARAM_DEV
6006
92e7ae71
HS
6007 /* The MTU/MSS Table is initialized by now, so load their values. If
6008 * we're initializing the adapter, then we'll make any modifications
6009 * we want to the MTU/MSS Table and also initialize the congestion
6010 * parameters.
636f9d37 6011 */
b8ff05a9 6012 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
92e7ae71
HS
6013 if (state != DEV_STATE_INIT) {
6014 int i;
6015
6016 /* The default MTU Table contains values 1492 and 1500.
6017 * However, for TCP, it's better to have two values which are
6018 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6019 * This allows us to have a TCP Data Payload which is a
6020 * multiple of 8 regardless of what combination of TCP Options
6021 * are in use (always a multiple of 4 bytes) which is
6022 * important for performance reasons. For instance, if no
6023 * options are in use, then we have a 20-byte IP header and a
6024 * 20-byte TCP header. In this case, a 1500-byte MSS would
6025 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6026 * which is not a multiple of 8. So using an MSS of 1488 in
6027 * this case results in a TCP Data Payload of 1448 bytes which
6028 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6029 * Stamps have been negotiated, then an MTU of 1500 bytes
6030 * results in a TCP Data Payload of 1448 bytes which, as
6031 * above, is a multiple of 8 bytes ...
6032 */
6033 for (i = 0; i < NMTUS; i++)
6034 if (adap->params.mtus[i] == 1492) {
6035 adap->params.mtus[i] = 1488;
6036 break;
6037 }
7ee9ff94 6038
92e7ae71
HS
6039 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6040 adap->params.b_wnd);
6041 }
dcf7b6f5 6042 t4_init_tp_params(adap);
636f9d37 6043 adap->flags |= FW_OK;
b8ff05a9
DM
6044 return 0;
6045
6046 /*
636f9d37
VP
6047 * Something bad happened. If a command timed out or failed with EIO
6048 * FW does not operate within its spec or something catastrophic
6049 * happened to HW/FW, stop issuing commands.
b8ff05a9 6050 */
636f9d37
VP
6051bye:
6052 if (ret != -ETIMEDOUT && ret != -EIO)
6053 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
6054 return ret;
6055}
6056
204dc3c0
DM
6057/* EEH callbacks */
6058
6059static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6060 pci_channel_state_t state)
6061{
6062 int i;
6063 struct adapter *adap = pci_get_drvdata(pdev);
6064
6065 if (!adap)
6066 goto out;
6067
6068 rtnl_lock();
6069 adap->flags &= ~FW_OK;
6070 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
9fe6cb58 6071 spin_lock(&adap->stats_lock);
204dc3c0
DM
6072 for_each_port(adap, i) {
6073 struct net_device *dev = adap->port[i];
6074
6075 netif_device_detach(dev);
6076 netif_carrier_off(dev);
6077 }
9fe6cb58 6078 spin_unlock(&adap->stats_lock);
204dc3c0
DM
6079 if (adap->flags & FULL_INIT_DONE)
6080 cxgb_down(adap);
6081 rtnl_unlock();
144be3d9
GS
6082 if ((adap->flags & DEV_ENABLED)) {
6083 pci_disable_device(pdev);
6084 adap->flags &= ~DEV_ENABLED;
6085 }
204dc3c0
DM
6086out: return state == pci_channel_io_perm_failure ?
6087 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6088}
6089
6090static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6091{
6092 int i, ret;
6093 struct fw_caps_config_cmd c;
6094 struct adapter *adap = pci_get_drvdata(pdev);
6095
6096 if (!adap) {
6097 pci_restore_state(pdev);
6098 pci_save_state(pdev);
6099 return PCI_ERS_RESULT_RECOVERED;
6100 }
6101
144be3d9
GS
6102 if (!(adap->flags & DEV_ENABLED)) {
6103 if (pci_enable_device(pdev)) {
6104 dev_err(&pdev->dev, "Cannot reenable PCI "
6105 "device after reset\n");
6106 return PCI_ERS_RESULT_DISCONNECT;
6107 }
6108 adap->flags |= DEV_ENABLED;
204dc3c0
DM
6109 }
6110
6111 pci_set_master(pdev);
6112 pci_restore_state(pdev);
6113 pci_save_state(pdev);
6114 pci_cleanup_aer_uncorrect_error_status(pdev);
6115
6116 if (t4_wait_dev_ready(adap) < 0)
6117 return PCI_ERS_RESULT_DISCONNECT;
777c2300 6118 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
204dc3c0
DM
6119 return PCI_ERS_RESULT_DISCONNECT;
6120 adap->flags |= FW_OK;
6121 if (adap_init1(adap, &c))
6122 return PCI_ERS_RESULT_DISCONNECT;
6123
6124 for_each_port(adap, i) {
6125 struct port_info *p = adap2pinfo(adap, i);
6126
060e0c75
DM
6127 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6128 NULL, NULL);
204dc3c0
DM
6129 if (ret < 0)
6130 return PCI_ERS_RESULT_DISCONNECT;
6131 p->viid = ret;
6132 p->xact_addr_filt = -1;
6133 }
6134
6135 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6136 adap->params.b_wnd);
1ae970e0 6137 setup_memwin(adap);
204dc3c0
DM
6138 if (cxgb_up(adap))
6139 return PCI_ERS_RESULT_DISCONNECT;
6140 return PCI_ERS_RESULT_RECOVERED;
6141}
6142
6143static void eeh_resume(struct pci_dev *pdev)
6144{
6145 int i;
6146 struct adapter *adap = pci_get_drvdata(pdev);
6147
6148 if (!adap)
6149 return;
6150
6151 rtnl_lock();
6152 for_each_port(adap, i) {
6153 struct net_device *dev = adap->port[i];
6154
6155 if (netif_running(dev)) {
6156 link_start(dev);
6157 cxgb_set_rxmode(dev);
6158 }
6159 netif_device_attach(dev);
6160 }
6161 rtnl_unlock();
6162}
6163
3646f0e5 6164static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
6165 .error_detected = eeh_err_detected,
6166 .slot_reset = eeh_slot_reset,
6167 .resume = eeh_resume,
6168};
6169
57d8b764 6170static inline bool is_x_10g_port(const struct link_config *lc)
b8ff05a9 6171{
57d8b764
KS
6172 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6173 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
b8ff05a9
DM
6174}
6175
c887ad0e
HS
6176static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6177 unsigned int us, unsigned int cnt,
b8ff05a9
DM
6178 unsigned int size, unsigned int iqe_size)
6179{
c887ad0e
HS
6180 q->adap = adap;
6181 set_rspq_intr_params(q, us, cnt);
b8ff05a9
DM
6182 q->iqe_len = iqe_size;
6183 q->size = size;
6184}
6185
6186/*
6187 * Perform default configuration of DMA queues depending on the number and type
6188 * of ports we found and the number of available CPUs. Most settings can be
6189 * modified by the admin prior to actual use.
6190 */
91744948 6191static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
6192{
6193 struct sge *s = &adap->sge;
688848b1
AB
6194 int i, n10g = 0, qidx = 0;
6195#ifndef CONFIG_CHELSIO_T4_DCB
6196 int q10g = 0;
6197#endif
cf38be6d 6198 int ciq_size;
b8ff05a9
DM
6199
6200 for_each_port(adap, i)
57d8b764 6201 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
688848b1
AB
6202#ifdef CONFIG_CHELSIO_T4_DCB
6203 /* For Data Center Bridging support we need to be able to support up
6204 * to 8 Traffic Priorities; each of which will be assigned to its
6205 * own TX Queue in order to prevent Head-Of-Line Blocking.
6206 */
6207 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6208 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6209 MAX_ETH_QSETS, adap->params.nports * 8);
6210 BUG_ON(1);
6211 }
b8ff05a9 6212
688848b1
AB
6213 for_each_port(adap, i) {
6214 struct port_info *pi = adap2pinfo(adap, i);
6215
6216 pi->first_qset = qidx;
6217 pi->nqsets = 8;
6218 qidx += pi->nqsets;
6219 }
6220#else /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
6221 /*
6222 * We default to 1 queue per non-10G port and up to # of cores queues
6223 * per 10G port.
6224 */
6225 if (n10g)
6226 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
6227 if (q10g > netif_get_num_default_rss_queues())
6228 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
6229
6230 for_each_port(adap, i) {
6231 struct port_info *pi = adap2pinfo(adap, i);
6232
6233 pi->first_qset = qidx;
57d8b764 6234 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
b8ff05a9
DM
6235 qidx += pi->nqsets;
6236 }
688848b1 6237#endif /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
6238
6239 s->ethqsets = qidx;
6240 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6241
6242 if (is_offload(adap)) {
6243 /*
6244 * For offload we use 1 queue/channel if all ports are up to 1G,
6245 * otherwise we divide all available queues amongst the channels
6246 * capped by the number of available cores.
6247 */
6248 if (n10g) {
6249 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6250 num_online_cpus());
6251 s->ofldqsets = roundup(i, adap->params.nports);
6252 } else
6253 s->ofldqsets = adap->params.nports;
6254 /* For RDMA one Rx queue per channel suffices */
6255 s->rdmaqs = adap->params.nports;
cf38be6d 6256 s->rdmaciqs = adap->params.nports;
b8ff05a9
DM
6257 }
6258
6259 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6260 struct sge_eth_rxq *r = &s->ethrxq[i];
6261
c887ad0e 6262 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
b8ff05a9
DM
6263 r->fl.size = 72;
6264 }
6265
6266 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6267 s->ethtxq[i].q.size = 1024;
6268
6269 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6270 s->ctrlq[i].q.size = 512;
6271
6272 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6273 s->ofldtxq[i].q.size = 1024;
6274
6275 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6276 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6277
c887ad0e 6278 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
b8ff05a9
DM
6279 r->rspq.uld = CXGB4_ULD_ISCSI;
6280 r->fl.size = 72;
6281 }
6282
6283 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6284 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6285
c887ad0e 6286 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
b8ff05a9
DM
6287 r->rspq.uld = CXGB4_ULD_RDMA;
6288 r->fl.size = 72;
6289 }
6290
cf38be6d
HS
6291 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6292 if (ciq_size > SGE_MAX_IQ_SIZE) {
6293 CH_WARN(adap, "CIQ size too small for available IQs\n");
6294 ciq_size = SGE_MAX_IQ_SIZE;
6295 }
6296
6297 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6298 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6299
c887ad0e 6300 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
cf38be6d
HS
6301 r->rspq.uld = CXGB4_ULD_RDMA;
6302 }
6303
c887ad0e
HS
6304 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6305 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
b8ff05a9
DM
6306}
6307
6308/*
6309 * Reduce the number of Ethernet queues across all ports to at most n.
6310 * n provides at least one queue per port.
6311 */
91744948 6312static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
6313{
6314 int i;
6315 struct port_info *pi;
6316
6317 while (n < adap->sge.ethqsets)
6318 for_each_port(adap, i) {
6319 pi = adap2pinfo(adap, i);
6320 if (pi->nqsets > 1) {
6321 pi->nqsets--;
6322 adap->sge.ethqsets--;
6323 if (adap->sge.ethqsets <= n)
6324 break;
6325 }
6326 }
6327
6328 n = 0;
6329 for_each_port(adap, i) {
6330 pi = adap2pinfo(adap, i);
6331 pi->first_qset = n;
6332 n += pi->nqsets;
6333 }
6334}
6335
6336/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6337#define EXTRA_VECS 2
6338
91744948 6339static int enable_msix(struct adapter *adap)
b8ff05a9
DM
6340{
6341 int ofld_need = 0;
c32ad224 6342 int i, want, need;
b8ff05a9
DM
6343 struct sge *s = &adap->sge;
6344 unsigned int nchan = adap->params.nports;
6345 struct msix_entry entries[MAX_INGQ + 1];
6346
6347 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6348 entries[i].entry = i;
6349
6350 want = s->max_ethqsets + EXTRA_VECS;
6351 if (is_offload(adap)) {
cf38be6d 6352 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
b8ff05a9 6353 /* need nchan for each possible ULD */
cf38be6d 6354 ofld_need = 3 * nchan;
b8ff05a9 6355 }
688848b1
AB
6356#ifdef CONFIG_CHELSIO_T4_DCB
6357 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6358 * each port.
6359 */
6360 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6361#else
b8ff05a9 6362 need = adap->params.nports + EXTRA_VECS + ofld_need;
688848b1 6363#endif
c32ad224
AG
6364 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6365 if (want < 0)
6366 return want;
b8ff05a9 6367
c32ad224
AG
6368 /*
6369 * Distribute available vectors to the various queue groups.
6370 * Every group gets its minimum requirement and NIC gets top
6371 * priority for leftovers.
6372 */
6373 i = want - EXTRA_VECS - ofld_need;
6374 if (i < s->max_ethqsets) {
6375 s->max_ethqsets = i;
6376 if (i < s->ethqsets)
6377 reduce_ethqs(adap, i);
6378 }
6379 if (is_offload(adap)) {
6380 i = want - EXTRA_VECS - s->max_ethqsets;
6381 i -= ofld_need - nchan;
6382 s->ofldqsets = (i / nchan) * nchan; /* round down */
6383 }
6384 for (i = 0; i < want; ++i)
6385 adap->msix_info[i].vec = entries[i].vector;
6386
6387 return 0;
b8ff05a9
DM
6388}
6389
6390#undef EXTRA_VECS
6391
91744948 6392static int init_rss(struct adapter *adap)
671b0060
DM
6393{
6394 unsigned int i, j;
6395
6396 for_each_port(adap, i) {
6397 struct port_info *pi = adap2pinfo(adap, i);
6398
6399 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6400 if (!pi->rss)
6401 return -ENOMEM;
6402 for (j = 0; j < pi->rss_size; j++)
278bc429 6403 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
6404 }
6405 return 0;
6406}
6407
91744948 6408static void print_port_info(const struct net_device *dev)
b8ff05a9 6409{
b8ff05a9 6410 char buf[80];
118969ed 6411 char *bufp = buf;
f1a051b9 6412 const char *spd = "";
118969ed
DM
6413 const struct port_info *pi = netdev_priv(dev);
6414 const struct adapter *adap = pi->adapter;
f1a051b9
DM
6415
6416 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6417 spd = " 2.5 GT/s";
6418 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6419 spd = " 5 GT/s";
d2e752db
RD
6420 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6421 spd = " 8 GT/s";
b8ff05a9 6422
118969ed
DM
6423 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6424 bufp += sprintf(bufp, "100/");
6425 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6426 bufp += sprintf(bufp, "1000/");
6427 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6428 bufp += sprintf(bufp, "10G/");
72aca4bf
KS
6429 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6430 bufp += sprintf(bufp, "40G/");
118969ed
DM
6431 if (bufp != buf)
6432 --bufp;
72aca4bf 6433 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
118969ed
DM
6434
6435 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
0a57a536 6436 adap->params.vpd.id,
d14807dd 6437 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
118969ed
DM
6438 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6439 (adap->flags & USING_MSIX) ? " MSI-X" :
6440 (adap->flags & USING_MSI) ? " MSI" : "");
a94cd705
KS
6441 netdev_info(dev, "S/N: %s, P/N: %s\n",
6442 adap->params.vpd.sn, adap->params.vpd.pn);
b8ff05a9
DM
6443}
6444
91744948 6445static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 6446{
e5c8ae5f 6447 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
6448}
6449
06546391
DM
6450/*
6451 * Free the following resources:
6452 * - memory used for tables
6453 * - MSI/MSI-X
6454 * - net devices
6455 * - resources FW is holding for us
6456 */
6457static void free_some_resources(struct adapter *adapter)
6458{
6459 unsigned int i;
6460
6461 t4_free_mem(adapter->l2t);
6462 t4_free_mem(adapter->tids.tid_tab);
6463 disable_msi(adapter);
6464
6465 for_each_port(adapter, i)
671b0060
DM
6466 if (adapter->port[i]) {
6467 kfree(adap2pinfo(adapter, i)->rss);
06546391 6468 free_netdev(adapter->port[i]);
671b0060 6469 }
06546391 6470 if (adapter->flags & FW_OK)
060e0c75 6471 t4_fw_bye(adapter, adapter->fn);
06546391
DM
6472}
6473
2ed28baa 6474#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 6475#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 6476 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 6477#define SEGMENT_SIZE 128
b8ff05a9 6478
1dd06ae8 6479static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 6480{
22adfe0a 6481 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 6482 struct port_info *pi;
c8f44aff 6483 bool highdma = false;
b8ff05a9 6484 struct adapter *adapter = NULL;
d6ce2628 6485 void __iomem *regs;
b8ff05a9
DM
6486
6487 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6488
6489 err = pci_request_regions(pdev, KBUILD_MODNAME);
6490 if (err) {
6491 /* Just info, some other driver may have claimed the device. */
6492 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6493 return err;
6494 }
6495
b8ff05a9
DM
6496 err = pci_enable_device(pdev);
6497 if (err) {
6498 dev_err(&pdev->dev, "cannot enable PCI device\n");
6499 goto out_release_regions;
6500 }
6501
d6ce2628
HS
6502 regs = pci_ioremap_bar(pdev, 0);
6503 if (!regs) {
6504 dev_err(&pdev->dev, "cannot map device registers\n");
6505 err = -ENOMEM;
6506 goto out_disable_device;
6507 }
6508
6509 /* We control everything through one PF */
6510 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6511 if (func != ent->driver_data) {
6512 iounmap(regs);
6513 pci_disable_device(pdev);
6514 pci_save_state(pdev); /* to restore SR-IOV later */
6515 goto sriov;
6516 }
6517
b8ff05a9 6518 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 6519 highdma = true;
b8ff05a9
DM
6520 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6521 if (err) {
6522 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6523 "coherent allocations\n");
d6ce2628 6524 goto out_unmap_bar0;
b8ff05a9
DM
6525 }
6526 } else {
6527 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6528 if (err) {
6529 dev_err(&pdev->dev, "no usable DMA configuration\n");
d6ce2628 6530 goto out_unmap_bar0;
b8ff05a9
DM
6531 }
6532 }
6533
6534 pci_enable_pcie_error_reporting(pdev);
ef306b50 6535 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
6536 pci_set_master(pdev);
6537 pci_save_state(pdev);
6538
6539 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6540 if (!adapter) {
6541 err = -ENOMEM;
d6ce2628 6542 goto out_unmap_bar0;
b8ff05a9
DM
6543 }
6544
29aaee65
AB
6545 adapter->workq = create_singlethread_workqueue("cxgb4");
6546 if (!adapter->workq) {
6547 err = -ENOMEM;
6548 goto out_free_adapter;
6549 }
6550
144be3d9
GS
6551 /* PCI device has been enabled */
6552 adapter->flags |= DEV_ENABLED;
6553
d6ce2628 6554 adapter->regs = regs;
b8ff05a9
DM
6555 adapter->pdev = pdev;
6556 adapter->pdev_dev = &pdev->dev;
3069ee9b 6557 adapter->mbox = func;
060e0c75 6558 adapter->fn = func;
b8ff05a9
DM
6559 adapter->msg_enable = dflt_msg_enable;
6560 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6561
6562 spin_lock_init(&adapter->stats_lock);
6563 spin_lock_init(&adapter->tid_release_lock);
6564
6565 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
6566 INIT_WORK(&adapter->db_full_task, process_db_full);
6567 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
6568
6569 err = t4_prep_adapter(adapter);
6570 if (err)
d6ce2628
HS
6571 goto out_free_adapter;
6572
22adfe0a 6573
d14807dd 6574 if (!is_t4(adapter->params.chip)) {
22adfe0a
SR
6575 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6576 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6577 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6578 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6579
6580 /* Each segment size is 128B. Write coalescing is enabled only
6581 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6582 * queue is less no of segments that can be accommodated in
6583 * a page size.
6584 */
6585 if (qpp > num_seg) {
6586 dev_err(&pdev->dev,
6587 "Incorrect number of egress queues per page\n");
6588 err = -EINVAL;
d6ce2628 6589 goto out_free_adapter;
22adfe0a
SR
6590 }
6591 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6592 pci_resource_len(pdev, 2));
6593 if (!adapter->bar2) {
6594 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6595 err = -ENOMEM;
d6ce2628 6596 goto out_free_adapter;
22adfe0a
SR
6597 }
6598 }
6599
636f9d37 6600 setup_memwin(adapter);
b8ff05a9 6601 err = adap_init0(adapter);
636f9d37 6602 setup_memwin_rdma(adapter);
b8ff05a9
DM
6603 if (err)
6604 goto out_unmap_bar;
6605
6606 for_each_port(adapter, i) {
6607 struct net_device *netdev;
6608
6609 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6610 MAX_ETH_QSETS);
6611 if (!netdev) {
6612 err = -ENOMEM;
6613 goto out_free_dev;
6614 }
6615
6616 SET_NETDEV_DEV(netdev, &pdev->dev);
6617
6618 adapter->port[i] = netdev;
6619 pi = netdev_priv(netdev);
6620 pi->adapter = adapter;
6621 pi->xact_addr_filt = -1;
b8ff05a9 6622 pi->port_id = i;
b8ff05a9
DM
6623 netdev->irq = pdev->irq;
6624
2ed28baa
MM
6625 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6626 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6627 NETIF_F_RXCSUM | NETIF_F_RXHASH |
f646968f 6628 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c8f44aff
MM
6629 if (highdma)
6630 netdev->hw_features |= NETIF_F_HIGHDMA;
6631 netdev->features |= netdev->hw_features;
b8ff05a9
DM
6632 netdev->vlan_features = netdev->features & VLAN_FEAT;
6633
01789349
JP
6634 netdev->priv_flags |= IFF_UNICAST_FLT;
6635
b8ff05a9 6636 netdev->netdev_ops = &cxgb4_netdev_ops;
688848b1
AB
6637#ifdef CONFIG_CHELSIO_T4_DCB
6638 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6639 cxgb4_dcb_state_init(netdev);
6640#endif
7ad24ea4 6641 netdev->ethtool_ops = &cxgb_ethtool_ops;
b8ff05a9
DM
6642 }
6643
6644 pci_set_drvdata(pdev, adapter);
6645
6646 if (adapter->flags & FW_OK) {
060e0c75 6647 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
6648 if (err)
6649 goto out_free_dev;
6650 }
6651
6652 /*
6653 * Configure queues and allocate tables now, they can be needed as
6654 * soon as the first register_netdev completes.
6655 */
6656 cfg_queues(adapter);
6657
6658 adapter->l2t = t4_init_l2t();
6659 if (!adapter->l2t) {
6660 /* We tolerate a lack of L2T, giving up some functionality */
6661 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6662 adapter->params.offload = 0;
6663 }
6664
6665 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6666 dev_warn(&pdev->dev, "could not allocate TID table, "
6667 "continuing\n");
6668 adapter->params.offload = 0;
6669 }
6670
f7cabcdd
DM
6671 /* See what interrupts we'll be using */
6672 if (msi > 1 && enable_msix(adapter) == 0)
6673 adapter->flags |= USING_MSIX;
6674 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6675 adapter->flags |= USING_MSI;
6676
671b0060
DM
6677 err = init_rss(adapter);
6678 if (err)
6679 goto out_free_dev;
6680
b8ff05a9
DM
6681 /*
6682 * The card is now ready to go. If any errors occur during device
6683 * registration we do not fail the whole card but rather proceed only
6684 * with the ports we manage to register successfully. However we must
6685 * register at least one net device.
6686 */
6687 for_each_port(adapter, i) {
a57cabe0
DM
6688 pi = adap2pinfo(adapter, i);
6689 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6690 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6691
b8ff05a9
DM
6692 err = register_netdev(adapter->port[i]);
6693 if (err)
b1a3c2b6 6694 break;
b1a3c2b6
DM
6695 adapter->chan_map[pi->tx_chan] = i;
6696 print_port_info(adapter->port[i]);
b8ff05a9 6697 }
b1a3c2b6 6698 if (i == 0) {
b8ff05a9
DM
6699 dev_err(&pdev->dev, "could not register any net devices\n");
6700 goto out_free_dev;
6701 }
b1a3c2b6
DM
6702 if (err) {
6703 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6704 err = 0;
6403eab1 6705 }
b8ff05a9
DM
6706
6707 if (cxgb4_debugfs_root) {
6708 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6709 cxgb4_debugfs_root);
6710 setup_debugfs(adapter);
6711 }
6712
6482aa7c
DLR
6713 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6714 pdev->needs_freset = 1;
6715
b8ff05a9
DM
6716 if (is_offload(adapter))
6717 attach_ulds(adapter);
6718
8e1e6059 6719sriov:
b8ff05a9 6720#ifdef CONFIG_PCI_IOV
7d6727cf 6721 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
b8ff05a9
DM
6722 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6723 dev_info(&pdev->dev,
6724 "instantiated %u virtual functions\n",
6725 num_vf[func]);
6726#endif
6727 return 0;
6728
6729 out_free_dev:
06546391 6730 free_some_resources(adapter);
b8ff05a9 6731 out_unmap_bar:
d14807dd 6732 if (!is_t4(adapter->params.chip))
22adfe0a 6733 iounmap(adapter->bar2);
b8ff05a9 6734 out_free_adapter:
29aaee65
AB
6735 if (adapter->workq)
6736 destroy_workqueue(adapter->workq);
6737
b8ff05a9 6738 kfree(adapter);
d6ce2628
HS
6739 out_unmap_bar0:
6740 iounmap(regs);
b8ff05a9
DM
6741 out_disable_device:
6742 pci_disable_pcie_error_reporting(pdev);
6743 pci_disable_device(pdev);
6744 out_release_regions:
6745 pci_release_regions(pdev);
b8ff05a9
DM
6746 return err;
6747}
6748
91744948 6749static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
6750{
6751 struct adapter *adapter = pci_get_drvdata(pdev);
6752
636f9d37 6753#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
6754 pci_disable_sriov(pdev);
6755
636f9d37
VP
6756#endif
6757
b8ff05a9
DM
6758 if (adapter) {
6759 int i;
6760
29aaee65
AB
6761 /* Tear down per-adapter Work Queue first since it can contain
6762 * references to our adapter data structure.
6763 */
6764 destroy_workqueue(adapter->workq);
6765
b8ff05a9
DM
6766 if (is_offload(adapter))
6767 detach_ulds(adapter);
6768
6769 for_each_port(adapter, i)
8f3a7676 6770 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
6771 unregister_netdev(adapter->port[i]);
6772
9f16dc2e 6773 debugfs_remove_recursive(adapter->debugfs_root);
b8ff05a9 6774
f2b7e78d
VP
6775 /* If we allocated filters, free up state associated with any
6776 * valid filters ...
6777 */
6778 if (adapter->tids.ftid_tab) {
6779 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
6780 for (i = 0; i < (adapter->tids.nftids +
6781 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
6782 if (f->valid)
6783 clear_filter(adapter, f);
6784 }
6785
aaefae9b
DM
6786 if (adapter->flags & FULL_INIT_DONE)
6787 cxgb_down(adapter);
b8ff05a9 6788
06546391 6789 free_some_resources(adapter);
b8ff05a9 6790 iounmap(adapter->regs);
d14807dd 6791 if (!is_t4(adapter->params.chip))
22adfe0a 6792 iounmap(adapter->bar2);
b8ff05a9 6793 pci_disable_pcie_error_reporting(pdev);
144be3d9
GS
6794 if ((adapter->flags & DEV_ENABLED)) {
6795 pci_disable_device(pdev);
6796 adapter->flags &= ~DEV_ENABLED;
6797 }
b8ff05a9 6798 pci_release_regions(pdev);
ee9a33b2 6799 synchronize_rcu();
8b662fe7 6800 kfree(adapter);
a069ec91 6801 } else
b8ff05a9
DM
6802 pci_release_regions(pdev);
6803}
6804
6805static struct pci_driver cxgb4_driver = {
6806 .name = KBUILD_MODNAME,
6807 .id_table = cxgb4_pci_tbl,
6808 .probe = init_one,
91744948 6809 .remove = remove_one,
687d705c 6810 .shutdown = remove_one,
204dc3c0 6811 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
6812};
6813
6814static int __init cxgb4_init_module(void)
6815{
6816 int ret;
6817
6818 /* Debugfs support is optional, just warn if this fails */
6819 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6820 if (!cxgb4_debugfs_root)
428ac43f 6821 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
6822
6823 ret = pci_register_driver(&cxgb4_driver);
29aaee65 6824 if (ret < 0)
b8ff05a9 6825 debugfs_remove(cxgb4_debugfs_root);
01bcca68
VP
6826
6827 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6828
b8ff05a9
DM
6829 return ret;
6830}
6831
6832static void __exit cxgb4_cleanup_module(void)
6833{
01bcca68 6834 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
b8ff05a9
DM
6835 pci_unregister_driver(&cxgb4_driver);
6836 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6837}
6838
6839module_init(cxgb4_init_module);
6840module_exit(cxgb4_cleanup_module);
This page took 0.823358 seconds and 5 git commands to generate.