cxgb4: Add LE hash collision bug fix path in LLD driver
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
63#include <asm/uaccess.h>
64
65#include "cxgb4.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "t4fw_api.h"
69#include "l2t.h"
70
99e6d065 71#define DRV_VERSION "1.3.0-ko"
b8ff05a9
DM
72#define DRV_DESC "Chelsio T4 Network Driver"
73
74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
77 * recover.
78 */
79#define MAX_SGE_TIMERVAL 200U
80
7ee9ff94 81enum {
13ee15d3
VP
82 /*
83 * Physical Function provisioning constants.
84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97#ifdef CONFIG_PCI_IOV
98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context.
105 */
7ee9ff94
CL
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
108
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
7ee9ff94 112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
13ee15d3 113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
7ee9ff94
CL
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
116
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
13ee15d3 119#endif
7ee9ff94
CL
120};
121
122/*
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
126 * controls.
127 */
128static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
130{
131 unsigned int portn, portvec;
132
133 /*
134 * Give PF's access to all of the ports.
135 */
136 if (vf == 0)
137 return FW_PFVF_CMD_PMASK_MASK;
138
139 /*
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
145 */
146 if (adapter->params.nports == 0)
147 return 0;
148
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
151 for (;;) {
152 /*
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
157 */
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 if (portn == 0)
160 return pmask;
161 portn--;
162 portvec &= ~pmask;
163 }
164 /*NOTREACHED*/
165}
7ee9ff94 166
b8ff05a9
DM
167enum {
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
175 MIN_FL_ENTRIES = 16
176};
177
f2b7e78d
VP
178/* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
183 */
184struct filter_entry {
185 /* Administrative fields for filter.
186 */
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
189
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
193
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
198 */
199 struct ch_filter_specification fs;
200};
201
b8ff05a9
DM
202#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
205
060e0c75 206#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
b8ff05a9
DM
207
208static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
060e0c75 209 CH_DEVICE(0xa000, 0), /* PE10K */
ccea790e
DM
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
f637d577
VP
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
b8ff05a9
DM
232 { 0, }
233};
234
235#define FW_FNAME "cxgb4/t4fw.bin"
636f9d37 236#define FW_CFNAME "cxgb4/t4-config.txt"
b8ff05a9
DM
237
238MODULE_DESCRIPTION(DRV_DESC);
239MODULE_AUTHOR("Chelsio Communications");
240MODULE_LICENSE("Dual BSD/GPL");
241MODULE_VERSION(DRV_VERSION);
242MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243MODULE_FIRMWARE(FW_FNAME);
244
636f9d37
VP
245/*
246 * Normally we're willing to become the firmware's Master PF but will be happy
247 * if another PF has already become the Master and initialized the adapter.
248 * Setting "force_init" will cause this driver to forcibly establish itself as
249 * the Master PF and initialize the adapter.
250 */
251static uint force_init;
252
253module_param(force_init, uint, 0644);
254MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
255
13ee15d3
VP
256/*
257 * Normally if the firmware we connect to has Configuration File support, we
258 * use that and only fall back to the old Driver-based initialization if the
259 * Configuration File fails for some reason. If force_old_init is set, then
260 * we'll always use the old Driver-based initialization sequence.
261 */
262static uint force_old_init;
263
264module_param(force_old_init, uint, 0644);
265MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
266
b8ff05a9
DM
267static int dflt_msg_enable = DFLT_MSG_ENABLE;
268
269module_param(dflt_msg_enable, int, 0644);
270MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
271
272/*
273 * The driver uses the best interrupt scheme available on a platform in the
274 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
275 * of these schemes the driver may consider as follows:
276 *
277 * msi = 2: choose from among all three options
278 * msi = 1: only consider MSI and INTx interrupts
279 * msi = 0: force INTx interrupts
280 */
281static int msi = 2;
282
283module_param(msi, int, 0644);
284MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
285
286/*
287 * Queue interrupt hold-off timer values. Queues default to the first of these
288 * upon creation.
289 */
290static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
291
292module_param_array(intr_holdoff, uint, NULL, 0644);
293MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
294 "0..4 in microseconds");
295
296static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
297
298module_param_array(intr_cnt, uint, NULL, 0644);
299MODULE_PARM_DESC(intr_cnt,
300 "thresholds 1..3 for queue interrupt packet counters");
301
636f9d37
VP
302/*
303 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
304 * offset by 2 bytes in order to have the IP headers line up on 4-byte
305 * boundaries. This is a requirement for many architectures which will throw
306 * a machine check fault if an attempt is made to access one of the 4-byte IP
307 * header fields on a non-4-byte boundary. And it's a major performance issue
308 * even on some architectures which allow it like some implementations of the
309 * x86 ISA. However, some architectures don't mind this and for some very
310 * edge-case performance sensitive applications (like forwarding large volumes
311 * of small packets), setting this DMA offset to 0 will decrease the number of
312 * PCI-E Bus transfers enough to measurably affect performance.
313 */
314static int rx_dma_offset = 2;
315
eb939922 316static bool vf_acls;
b8ff05a9
DM
317
318#ifdef CONFIG_PCI_IOV
319module_param(vf_acls, bool, 0644);
320MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
321
322static unsigned int num_vf[4];
323
324module_param_array(num_vf, uint, NULL, 0644);
325MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
326#endif
327
13ee15d3
VP
328/*
329 * The filter TCAM has a fixed portion and a variable portion. The fixed
330 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
331 * ports. The variable portion is 36 bits which can include things like Exact
332 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
333 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
334 * far exceed the 36-bit budget for this "compressed" header portion of the
335 * filter. Thus, we have a scarce resource which must be carefully managed.
336 *
337 * By default we set this up to mostly match the set of filter matching
338 * capabilities of T3 but with accommodations for some of T4's more
339 * interesting features:
340 *
341 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
342 * [Inner] VLAN (17), Port (3), FCoE (1) }
343 */
344enum {
345 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
346 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
347 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
348};
349
350static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
351
f2b7e78d
VP
352module_param(tp_vlan_pri_map, uint, 0644);
353MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
354
b8ff05a9
DM
355static struct dentry *cxgb4_debugfs_root;
356
357static LIST_HEAD(adapter_list);
358static DEFINE_MUTEX(uld_mutex);
359static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
360static const char *uld_str[] = { "RDMA", "iSCSI" };
361
362static void link_report(struct net_device *dev)
363{
364 if (!netif_carrier_ok(dev))
365 netdev_info(dev, "link down\n");
366 else {
367 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
368
369 const char *s = "10Mbps";
370 const struct port_info *p = netdev_priv(dev);
371
372 switch (p->link_cfg.speed) {
373 case SPEED_10000:
374 s = "10Gbps";
375 break;
376 case SPEED_1000:
377 s = "1000Mbps";
378 break;
379 case SPEED_100:
380 s = "100Mbps";
381 break;
382 }
383
384 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
385 fc[p->link_cfg.fc]);
386 }
387}
388
389void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
390{
391 struct net_device *dev = adapter->port[port_id];
392
393 /* Skip changes from disabled ports. */
394 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
395 if (link_stat)
396 netif_carrier_on(dev);
397 else
398 netif_carrier_off(dev);
399
400 link_report(dev);
401 }
402}
403
404void t4_os_portmod_changed(const struct adapter *adap, int port_id)
405{
406 static const char *mod_str[] = {
a0881cab 407 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
408 };
409
410 const struct net_device *dev = adap->port[port_id];
411 const struct port_info *pi = netdev_priv(dev);
412
413 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
414 netdev_info(dev, "port module unplugged\n");
a0881cab 415 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
416 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
417}
418
419/*
420 * Configure the exact and hash address filters to handle a port's multicast
421 * and secondary unicast MAC addresses.
422 */
423static int set_addr_filters(const struct net_device *dev, bool sleep)
424{
425 u64 mhash = 0;
426 u64 uhash = 0;
427 bool free = true;
428 u16 filt_idx[7];
429 const u8 *addr[7];
430 int ret, naddr = 0;
b8ff05a9
DM
431 const struct netdev_hw_addr *ha;
432 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 433 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 434 const struct port_info *pi = netdev_priv(dev);
060e0c75 435 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
436
437 /* first do the secondary unicast addresses */
438 netdev_for_each_uc_addr(ha, dev) {
439 addr[naddr++] = ha->addr;
440 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 441 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
442 naddr, addr, filt_idx, &uhash, sleep);
443 if (ret < 0)
444 return ret;
445
446 free = false;
447 naddr = 0;
448 }
449 }
450
451 /* next set up the multicast addresses */
4a35ecf8
DM
452 netdev_for_each_mc_addr(ha, dev) {
453 addr[naddr++] = ha->addr;
454 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 455 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
456 naddr, addr, filt_idx, &mhash, sleep);
457 if (ret < 0)
458 return ret;
459
460 free = false;
461 naddr = 0;
462 }
463 }
464
060e0c75 465 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
466 uhash | mhash, sleep);
467}
468
3069ee9b
VP
469int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
470module_param(dbfifo_int_thresh, int, 0644);
471MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
472
404d9e3f
VP
473/*
474 * usecs to sleep while draining the dbfifo
475 */
476static int dbfifo_drain_delay = 1000;
3069ee9b
VP
477module_param(dbfifo_drain_delay, int, 0644);
478MODULE_PARM_DESC(dbfifo_drain_delay,
479 "usecs to sleep while draining the dbfifo");
480
b8ff05a9
DM
481/*
482 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
483 * If @mtu is -1 it is left unchanged.
484 */
485static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
486{
487 int ret;
488 struct port_info *pi = netdev_priv(dev);
489
490 ret = set_addr_filters(dev, sleep_ok);
491 if (ret == 0)
060e0c75 492 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
b8ff05a9 493 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 494 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
495 sleep_ok);
496 return ret;
497}
498
3069ee9b
VP
499static struct workqueue_struct *workq;
500
b8ff05a9
DM
501/**
502 * link_start - enable a port
503 * @dev: the port to enable
504 *
505 * Performs the MAC and PHY actions needed to enable a port.
506 */
507static int link_start(struct net_device *dev)
508{
509 int ret;
510 struct port_info *pi = netdev_priv(dev);
060e0c75 511 unsigned int mb = pi->adapter->fn;
b8ff05a9
DM
512
513 /*
514 * We do not set address filters and promiscuity here, the stack does
515 * that step explicitly.
516 */
060e0c75 517 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
19ecae2c 518 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
b8ff05a9 519 if (ret == 0) {
060e0c75 520 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 521 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 522 true);
b8ff05a9
DM
523 if (ret >= 0) {
524 pi->xact_addr_filt = ret;
525 ret = 0;
526 }
527 }
528 if (ret == 0)
060e0c75
DM
529 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
530 &pi->link_cfg);
b8ff05a9 531 if (ret == 0)
060e0c75 532 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
b8ff05a9
DM
533 return ret;
534}
535
f2b7e78d
VP
536/* Clear a filter and release any of its resources that we own. This also
537 * clears the filter's "pending" status.
538 */
539static void clear_filter(struct adapter *adap, struct filter_entry *f)
540{
541 /* If the new or old filter have loopback rewriteing rules then we'll
542 * need to free any existing Layer Two Table (L2T) entries of the old
543 * filter rule. The firmware will handle freeing up any Source MAC
544 * Table (SMT) entries used for rewriting Source MAC Addresses in
545 * loopback rules.
546 */
547 if (f->l2t)
548 cxgb4_l2t_release(f->l2t);
549
550 /* The zeroing of the filter rule below clears the filter valid,
551 * pending, locked flags, l2t pointer, etc. so it's all we need for
552 * this operation.
553 */
554 memset(f, 0, sizeof(*f));
555}
556
557/* Handle a filter write/deletion reply.
558 */
559static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
560{
561 unsigned int idx = GET_TID(rpl);
562 unsigned int nidx = idx - adap->tids.ftid_base;
563 unsigned int ret;
564 struct filter_entry *f;
565
566 if (idx >= adap->tids.ftid_base && nidx <
567 (adap->tids.nftids + adap->tids.nsftids)) {
568 idx = nidx;
569 ret = GET_TCB_COOKIE(rpl->cookie);
570 f = &adap->tids.ftid_tab[idx];
571
572 if (ret == FW_FILTER_WR_FLT_DELETED) {
573 /* Clear the filter when we get confirmation from the
574 * hardware that the filter has been deleted.
575 */
576 clear_filter(adap, f);
577 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
578 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
579 idx);
580 clear_filter(adap, f);
581 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
582 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
583 f->pending = 0; /* asynchronous setup completed */
584 f->valid = 1;
585 } else {
586 /* Something went wrong. Issue a warning about the
587 * problem and clear everything out.
588 */
589 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
590 idx, ret);
591 clear_filter(adap, f);
592 }
593 }
594}
595
596/* Response queue handler for the FW event queue.
b8ff05a9
DM
597 */
598static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
599 const struct pkt_gl *gl)
600{
601 u8 opcode = ((const struct rss_header *)rsp)->opcode;
602
603 rsp++; /* skip RSS header */
604 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
605 const struct cpl_sge_egr_update *p = (void *)rsp;
606 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
e46dab4d 607 struct sge_txq *txq;
b8ff05a9 608
e46dab4d 609 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 610 txq->restarts++;
e46dab4d 611 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
612 struct sge_eth_txq *eq;
613
614 eq = container_of(txq, struct sge_eth_txq, q);
615 netif_tx_wake_queue(eq->txq);
616 } else {
617 struct sge_ofld_txq *oq;
618
619 oq = container_of(txq, struct sge_ofld_txq, q);
620 tasklet_schedule(&oq->qresume_tsk);
621 }
622 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
623 const struct cpl_fw6_msg *p = (void *)rsp;
624
625 if (p->type == 0)
626 t4_handle_fw_rpl(q->adap, p->data);
627 } else if (opcode == CPL_L2T_WRITE_RPL) {
628 const struct cpl_l2t_write_rpl *p = (void *)rsp;
629
630 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
631 } else if (opcode == CPL_SET_TCB_RPL) {
632 const struct cpl_set_tcb_rpl *p = (void *)rsp;
633
634 filter_rpl(q->adap, p);
b8ff05a9
DM
635 } else
636 dev_err(q->adap->pdev_dev,
637 "unexpected CPL %#x on FW event queue\n", opcode);
638 return 0;
639}
640
641/**
642 * uldrx_handler - response queue handler for ULD queues
643 * @q: the response queue that received the packet
644 * @rsp: the response queue descriptor holding the offload message
645 * @gl: the gather list of packet fragments
646 *
647 * Deliver an ingress offload packet to a ULD. All processing is done by
648 * the ULD, we just maintain statistics.
649 */
650static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
651 const struct pkt_gl *gl)
652{
653 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
654
655 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
656 rxq->stats.nomem++;
657 return -1;
658 }
659 if (gl == NULL)
660 rxq->stats.imm++;
661 else if (gl == CXGB4_MSG_AN)
662 rxq->stats.an++;
663 else
664 rxq->stats.pkts++;
665 return 0;
666}
667
668static void disable_msi(struct adapter *adapter)
669{
670 if (adapter->flags & USING_MSIX) {
671 pci_disable_msix(adapter->pdev);
672 adapter->flags &= ~USING_MSIX;
673 } else if (adapter->flags & USING_MSI) {
674 pci_disable_msi(adapter->pdev);
675 adapter->flags &= ~USING_MSI;
676 }
677}
678
679/*
680 * Interrupt handler for non-data events used with MSI-X.
681 */
682static irqreturn_t t4_nondata_intr(int irq, void *cookie)
683{
684 struct adapter *adap = cookie;
685
686 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
687 if (v & PFSW) {
688 adap->swintr = 1;
689 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
690 }
691 t4_slow_intr_handler(adap);
692 return IRQ_HANDLED;
693}
694
695/*
696 * Name the MSI-X interrupts.
697 */
698static void name_msix_vecs(struct adapter *adap)
699{
ba27816c 700 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
701
702 /* non-data interrupts */
b1a3c2b6 703 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
704
705 /* FW events */
b1a3c2b6
DM
706 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
707 adap->port[0]->name);
b8ff05a9
DM
708
709 /* Ethernet queues */
710 for_each_port(adap, j) {
711 struct net_device *d = adap->port[j];
712 const struct port_info *pi = netdev_priv(d);
713
ba27816c 714 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
715 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
716 d->name, i);
b8ff05a9
DM
717 }
718
719 /* offload queues */
ba27816c
DM
720 for_each_ofldrxq(&adap->sge, i)
721 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
b1a3c2b6 722 adap->port[0]->name, i);
ba27816c
DM
723
724 for_each_rdmarxq(&adap->sge, i)
725 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 726 adap->port[0]->name, i);
b8ff05a9
DM
727}
728
729static int request_msix_queue_irqs(struct adapter *adap)
730{
731 struct sge *s = &adap->sge;
404d9e3f 732 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
b8ff05a9
DM
733
734 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
735 adap->msix_info[1].desc, &s->fw_evtq);
736 if (err)
737 return err;
738
739 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
740 err = request_irq(adap->msix_info[msi_index].vec,
741 t4_sge_intr_msix, 0,
742 adap->msix_info[msi_index].desc,
b8ff05a9
DM
743 &s->ethrxq[ethqidx].rspq);
744 if (err)
745 goto unwind;
404d9e3f 746 msi_index++;
b8ff05a9
DM
747 }
748 for_each_ofldrxq(s, ofldqidx) {
404d9e3f
VP
749 err = request_irq(adap->msix_info[msi_index].vec,
750 t4_sge_intr_msix, 0,
751 adap->msix_info[msi_index].desc,
b8ff05a9
DM
752 &s->ofldrxq[ofldqidx].rspq);
753 if (err)
754 goto unwind;
404d9e3f 755 msi_index++;
b8ff05a9
DM
756 }
757 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
758 err = request_irq(adap->msix_info[msi_index].vec,
759 t4_sge_intr_msix, 0,
760 adap->msix_info[msi_index].desc,
b8ff05a9
DM
761 &s->rdmarxq[rdmaqidx].rspq);
762 if (err)
763 goto unwind;
404d9e3f 764 msi_index++;
b8ff05a9
DM
765 }
766 return 0;
767
768unwind:
769 while (--rdmaqidx >= 0)
404d9e3f 770 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
771 &s->rdmarxq[rdmaqidx].rspq);
772 while (--ofldqidx >= 0)
404d9e3f 773 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9
DM
774 &s->ofldrxq[ofldqidx].rspq);
775 while (--ethqidx >= 0)
404d9e3f
VP
776 free_irq(adap->msix_info[--msi_index].vec,
777 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
778 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
779 return err;
780}
781
782static void free_msix_queue_irqs(struct adapter *adap)
783{
404d9e3f 784 int i, msi_index = 2;
b8ff05a9
DM
785 struct sge *s = &adap->sge;
786
787 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
788 for_each_ethrxq(s, i)
404d9e3f 789 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
b8ff05a9 790 for_each_ofldrxq(s, i)
404d9e3f 791 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
b8ff05a9 792 for_each_rdmarxq(s, i)
404d9e3f 793 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
b8ff05a9
DM
794}
795
671b0060
DM
796/**
797 * write_rss - write the RSS table for a given port
798 * @pi: the port
799 * @queues: array of queue indices for RSS
800 *
801 * Sets up the portion of the HW RSS table for the port's VI to distribute
802 * packets to the Rx queues in @queues.
803 */
804static int write_rss(const struct port_info *pi, const u16 *queues)
805{
806 u16 *rss;
807 int i, err;
808 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
809
810 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
811 if (!rss)
812 return -ENOMEM;
813
814 /* map the queue indices to queue ids */
815 for (i = 0; i < pi->rss_size; i++, queues++)
816 rss[i] = q[*queues].rspq.abs_id;
817
060e0c75
DM
818 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
819 pi->rss_size, rss, pi->rss_size);
671b0060
DM
820 kfree(rss);
821 return err;
822}
823
b8ff05a9
DM
824/**
825 * setup_rss - configure RSS
826 * @adap: the adapter
827 *
671b0060 828 * Sets up RSS for each port.
b8ff05a9
DM
829 */
830static int setup_rss(struct adapter *adap)
831{
671b0060 832 int i, err;
b8ff05a9
DM
833
834 for_each_port(adap, i) {
835 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 836
671b0060 837 err = write_rss(pi, pi->rss);
b8ff05a9
DM
838 if (err)
839 return err;
840 }
841 return 0;
842}
843
e46dab4d
DM
844/*
845 * Return the channel of the ingress queue with the given qid.
846 */
847static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
848{
849 qid -= p->ingr_start;
850 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
851}
852
b8ff05a9
DM
853/*
854 * Wait until all NAPI handlers are descheduled.
855 */
856static void quiesce_rx(struct adapter *adap)
857{
858 int i;
859
860 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
861 struct sge_rspq *q = adap->sge.ingr_map[i];
862
863 if (q && q->handler)
864 napi_disable(&q->napi);
865 }
866}
867
868/*
869 * Enable NAPI scheduling and interrupt generation for all Rx queues.
870 */
871static void enable_rx(struct adapter *adap)
872{
873 int i;
874
875 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
876 struct sge_rspq *q = adap->sge.ingr_map[i];
877
878 if (!q)
879 continue;
880 if (q->handler)
881 napi_enable(&q->napi);
882 /* 0-increment GTS to start the timer and enable interrupts */
883 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
884 SEINTARM(q->intr_params) |
885 INGRESSQID(q->cntxt_id));
886 }
887}
888
889/**
890 * setup_sge_queues - configure SGE Tx/Rx/response queues
891 * @adap: the adapter
892 *
893 * Determines how many sets of SGE queues to use and initializes them.
894 * We support multiple queue sets per port if we have MSI-X, otherwise
895 * just one queue set per port.
896 */
897static int setup_sge_queues(struct adapter *adap)
898{
899 int err, msi_idx, i, j;
900 struct sge *s = &adap->sge;
901
902 bitmap_zero(s->starving_fl, MAX_EGRQ);
903 bitmap_zero(s->txq_maperr, MAX_EGRQ);
904
905 if (adap->flags & USING_MSIX)
906 msi_idx = 1; /* vector 0 is for non-queue interrupts */
907 else {
908 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
909 NULL, NULL);
910 if (err)
911 return err;
912 msi_idx = -((int)s->intrq.abs_id + 1);
913 }
914
915 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
916 msi_idx, NULL, fwevtq_handler);
917 if (err) {
918freeout: t4_free_sge_resources(adap);
919 return err;
920 }
921
922 for_each_port(adap, i) {
923 struct net_device *dev = adap->port[i];
924 struct port_info *pi = netdev_priv(dev);
925 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
926 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
927
928 for (j = 0; j < pi->nqsets; j++, q++) {
929 if (msi_idx > 0)
930 msi_idx++;
931 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
932 msi_idx, &q->fl,
933 t4_ethrx_handler);
934 if (err)
935 goto freeout;
936 q->rspq.idx = j;
937 memset(&q->stats, 0, sizeof(q->stats));
938 }
939 for (j = 0; j < pi->nqsets; j++, t++) {
940 err = t4_sge_alloc_eth_txq(adap, t, dev,
941 netdev_get_tx_queue(dev, j),
942 s->fw_evtq.cntxt_id);
943 if (err)
944 goto freeout;
945 }
946 }
947
948 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
949 for_each_ofldrxq(s, i) {
950 struct sge_ofld_rxq *q = &s->ofldrxq[i];
951 struct net_device *dev = adap->port[i / j];
952
953 if (msi_idx > 0)
954 msi_idx++;
955 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
956 &q->fl, uldrx_handler);
957 if (err)
958 goto freeout;
959 memset(&q->stats, 0, sizeof(q->stats));
960 s->ofld_rxq[i] = q->rspq.abs_id;
961 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
962 s->fw_evtq.cntxt_id);
963 if (err)
964 goto freeout;
965 }
966
967 for_each_rdmarxq(s, i) {
968 struct sge_ofld_rxq *q = &s->rdmarxq[i];
969
970 if (msi_idx > 0)
971 msi_idx++;
972 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
973 msi_idx, &q->fl, uldrx_handler);
974 if (err)
975 goto freeout;
976 memset(&q->stats, 0, sizeof(q->stats));
977 s->rdma_rxq[i] = q->rspq.abs_id;
978 }
979
980 for_each_port(adap, i) {
981 /*
982 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
983 * have RDMA queues, and that's the right value.
984 */
985 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
986 s->fw_evtq.cntxt_id,
987 s->rdmarxq[i].rspq.cntxt_id);
988 if (err)
989 goto freeout;
990 }
991
992 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
993 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
994 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
995 return 0;
996}
997
998/*
999 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1000 * started but failed, and a negative errno if flash load couldn't start.
1001 */
1002static int upgrade_fw(struct adapter *adap)
1003{
1004 int ret;
1005 u32 vers;
1006 const struct fw_hdr *hdr;
1007 const struct firmware *fw;
1008 struct device *dev = adap->pdev_dev;
1009
1010 ret = request_firmware(&fw, FW_FNAME, dev);
1011 if (ret < 0) {
1012 dev_err(dev, "unable to load firmware image " FW_FNAME
1013 ", error %d\n", ret);
1014 return ret;
1015 }
1016
1017 hdr = (const struct fw_hdr *)fw->data;
1018 vers = ntohl(hdr->fw_ver);
1019 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
1020 ret = -EINVAL; /* wrong major version, won't do */
1021 goto out;
1022 }
1023
1024 /*
1025 * If the flash FW is unusable or we found something newer, load it.
1026 */
1027 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
1028 vers > adap->params.fw_vers) {
26f7cbc0
VP
1029 dev_info(dev, "upgrading firmware ...\n");
1030 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1031 /*force=*/false);
b8ff05a9 1032 if (!ret)
26f7cbc0
VP
1033 dev_info(dev, "firmware successfully upgraded to "
1034 FW_FNAME " (%d.%d.%d.%d)\n",
1035 FW_HDR_FW_VER_MAJOR_GET(vers),
1036 FW_HDR_FW_VER_MINOR_GET(vers),
1037 FW_HDR_FW_VER_MICRO_GET(vers),
1038 FW_HDR_FW_VER_BUILD_GET(vers));
1039 else
1040 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1648a22b
VP
1041 } else {
1042 /*
1043 * Tell our caller that we didn't upgrade the firmware.
1044 */
1045 ret = -EINVAL;
b8ff05a9 1046 }
1648a22b 1047
b8ff05a9
DM
1048out: release_firmware(fw);
1049 return ret;
1050}
1051
1052/*
1053 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1054 * The allocated memory is cleared.
1055 */
1056void *t4_alloc_mem(size_t size)
1057{
89bf67f1 1058 void *p = kzalloc(size, GFP_KERNEL);
b8ff05a9
DM
1059
1060 if (!p)
89bf67f1 1061 p = vzalloc(size);
b8ff05a9
DM
1062 return p;
1063}
1064
1065/*
1066 * Free memory allocated through alloc_mem().
1067 */
31b9c19b 1068static void t4_free_mem(void *addr)
b8ff05a9
DM
1069{
1070 if (is_vmalloc_addr(addr))
1071 vfree(addr);
1072 else
1073 kfree(addr);
1074}
1075
f2b7e78d
VP
1076/* Send a Work Request to write the filter at a specified index. We construct
1077 * a Firmware Filter Work Request to have the work done and put the indicated
1078 * filter into "pending" mode which will prevent any further actions against
1079 * it till we get a reply from the firmware on the completion status of the
1080 * request.
1081 */
1082static int set_filter_wr(struct adapter *adapter, int fidx)
1083{
1084 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1085 struct sk_buff *skb;
1086 struct fw_filter_wr *fwr;
1087 unsigned int ftid;
1088
1089 /* If the new filter requires loopback Destination MAC and/or VLAN
1090 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1091 * the filter.
1092 */
1093 if (f->fs.newdmac || f->fs.newvlan) {
1094 /* allocate L2T entry for new filter */
1095 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1096 if (f->l2t == NULL)
1097 return -EAGAIN;
1098 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1099 f->fs.eport, f->fs.dmac)) {
1100 cxgb4_l2t_release(f->l2t);
1101 f->l2t = NULL;
1102 return -ENOMEM;
1103 }
1104 }
1105
1106 ftid = adapter->tids.ftid_base + fidx;
1107
1108 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1109 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1110 memset(fwr, 0, sizeof(*fwr));
1111
1112 /* It would be nice to put most of the following in t4_hw.c but most
1113 * of the work is translating the cxgbtool ch_filter_specification
1114 * into the Work Request and the definition of that structure is
1115 * currently in cxgbtool.h which isn't appropriate to pull into the
1116 * common code. We may eventually try to come up with a more neutral
1117 * filter specification structure but for now it's easiest to simply
1118 * put this fairly direct code in line ...
1119 */
1120 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1121 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1122 fwr->tid_to_iq =
1123 htonl(V_FW_FILTER_WR_TID(ftid) |
1124 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1125 V_FW_FILTER_WR_NOREPLY(0) |
1126 V_FW_FILTER_WR_IQ(f->fs.iq));
1127 fwr->del_filter_to_l2tix =
1128 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1129 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1130 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1131 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1132 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1133 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1134 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1135 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1136 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1137 f->fs.newvlan == VLAN_REWRITE) |
1138 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1139 f->fs.newvlan == VLAN_REWRITE) |
1140 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1141 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1142 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1143 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1144 fwr->ethtype = htons(f->fs.val.ethtype);
1145 fwr->ethtypem = htons(f->fs.mask.ethtype);
1146 fwr->frag_to_ovlan_vldm =
1147 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1148 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1149 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1150 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1151 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1152 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1153 fwr->smac_sel = 0;
1154 fwr->rx_chan_rx_rpl_iq =
1155 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1156 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1157 fwr->maci_to_matchtypem =
1158 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1159 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1160 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1161 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1162 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1163 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1164 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1165 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1166 fwr->ptcl = f->fs.val.proto;
1167 fwr->ptclm = f->fs.mask.proto;
1168 fwr->ttyp = f->fs.val.tos;
1169 fwr->ttypm = f->fs.mask.tos;
1170 fwr->ivlan = htons(f->fs.val.ivlan);
1171 fwr->ivlanm = htons(f->fs.mask.ivlan);
1172 fwr->ovlan = htons(f->fs.val.ovlan);
1173 fwr->ovlanm = htons(f->fs.mask.ovlan);
1174 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1175 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1176 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1177 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1178 fwr->lp = htons(f->fs.val.lport);
1179 fwr->lpm = htons(f->fs.mask.lport);
1180 fwr->fp = htons(f->fs.val.fport);
1181 fwr->fpm = htons(f->fs.mask.fport);
1182 if (f->fs.newsmac)
1183 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1184
1185 /* Mark the filter as "pending" and ship off the Filter Work Request.
1186 * When we get the Work Request Reply we'll clear the pending status.
1187 */
1188 f->pending = 1;
1189 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1190 t4_ofld_send(adapter, skb);
1191 return 0;
1192}
1193
1194/* Delete the filter at a specified index.
1195 */
1196static int del_filter_wr(struct adapter *adapter, int fidx)
1197{
1198 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1199 struct sk_buff *skb;
1200 struct fw_filter_wr *fwr;
1201 unsigned int len, ftid;
1202
1203 len = sizeof(*fwr);
1204 ftid = adapter->tids.ftid_base + fidx;
1205
1206 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1207 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1208 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1209
1210 /* Mark the filter as "pending" and ship off the Filter Work Request.
1211 * When we get the Work Request Reply we'll clear the pending status.
1212 */
1213 f->pending = 1;
1214 t4_mgmt_tx(adapter, skb);
1215 return 0;
1216}
1217
b8ff05a9
DM
1218static inline int is_offload(const struct adapter *adap)
1219{
1220 return adap->params.offload;
1221}
1222
1223/*
1224 * Implementation of ethtool operations.
1225 */
1226
1227static u32 get_msglevel(struct net_device *dev)
1228{
1229 return netdev2adap(dev)->msg_enable;
1230}
1231
1232static void set_msglevel(struct net_device *dev, u32 val)
1233{
1234 netdev2adap(dev)->msg_enable = val;
1235}
1236
1237static char stats_strings[][ETH_GSTRING_LEN] = {
1238 "TxOctetsOK ",
1239 "TxFramesOK ",
1240 "TxBroadcastFrames ",
1241 "TxMulticastFrames ",
1242 "TxUnicastFrames ",
1243 "TxErrorFrames ",
1244
1245 "TxFrames64 ",
1246 "TxFrames65To127 ",
1247 "TxFrames128To255 ",
1248 "TxFrames256To511 ",
1249 "TxFrames512To1023 ",
1250 "TxFrames1024To1518 ",
1251 "TxFrames1519ToMax ",
1252
1253 "TxFramesDropped ",
1254 "TxPauseFrames ",
1255 "TxPPP0Frames ",
1256 "TxPPP1Frames ",
1257 "TxPPP2Frames ",
1258 "TxPPP3Frames ",
1259 "TxPPP4Frames ",
1260 "TxPPP5Frames ",
1261 "TxPPP6Frames ",
1262 "TxPPP7Frames ",
1263
1264 "RxOctetsOK ",
1265 "RxFramesOK ",
1266 "RxBroadcastFrames ",
1267 "RxMulticastFrames ",
1268 "RxUnicastFrames ",
1269
1270 "RxFramesTooLong ",
1271 "RxJabberErrors ",
1272 "RxFCSErrors ",
1273 "RxLengthErrors ",
1274 "RxSymbolErrors ",
1275 "RxRuntFrames ",
1276
1277 "RxFrames64 ",
1278 "RxFrames65To127 ",
1279 "RxFrames128To255 ",
1280 "RxFrames256To511 ",
1281 "RxFrames512To1023 ",
1282 "RxFrames1024To1518 ",
1283 "RxFrames1519ToMax ",
1284
1285 "RxPauseFrames ",
1286 "RxPPP0Frames ",
1287 "RxPPP1Frames ",
1288 "RxPPP2Frames ",
1289 "RxPPP3Frames ",
1290 "RxPPP4Frames ",
1291 "RxPPP5Frames ",
1292 "RxPPP6Frames ",
1293 "RxPPP7Frames ",
1294
1295 "RxBG0FramesDropped ",
1296 "RxBG1FramesDropped ",
1297 "RxBG2FramesDropped ",
1298 "RxBG3FramesDropped ",
1299 "RxBG0FramesTrunc ",
1300 "RxBG1FramesTrunc ",
1301 "RxBG2FramesTrunc ",
1302 "RxBG3FramesTrunc ",
1303
1304 "TSO ",
1305 "TxCsumOffload ",
1306 "RxCsumGood ",
1307 "VLANextractions ",
1308 "VLANinsertions ",
4a6346d4
DM
1309 "GROpackets ",
1310 "GROmerged ",
b8ff05a9
DM
1311};
1312
1313static int get_sset_count(struct net_device *dev, int sset)
1314{
1315 switch (sset) {
1316 case ETH_SS_STATS:
1317 return ARRAY_SIZE(stats_strings);
1318 default:
1319 return -EOPNOTSUPP;
1320 }
1321}
1322
1323#define T4_REGMAP_SIZE (160 * 1024)
1324
1325static int get_regs_len(struct net_device *dev)
1326{
1327 return T4_REGMAP_SIZE;
1328}
1329
1330static int get_eeprom_len(struct net_device *dev)
1331{
1332 return EEPROMSIZE;
1333}
1334
1335static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1336{
1337 struct adapter *adapter = netdev2adap(dev);
1338
23020ab3
RJ
1339 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1340 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1341 strlcpy(info->bus_info, pci_name(adapter->pdev),
1342 sizeof(info->bus_info));
b8ff05a9 1343
84b40501 1344 if (adapter->params.fw_vers)
b8ff05a9
DM
1345 snprintf(info->fw_version, sizeof(info->fw_version),
1346 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1347 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1348 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1349 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1350 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1351 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1352 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1353 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1354 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1355}
1356
1357static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1358{
1359 if (stringset == ETH_SS_STATS)
1360 memcpy(data, stats_strings, sizeof(stats_strings));
1361}
1362
1363/*
1364 * port stats maintained per queue of the port. They should be in the same
1365 * order as in stats_strings above.
1366 */
1367struct queue_port_stats {
1368 u64 tso;
1369 u64 tx_csum;
1370 u64 rx_csum;
1371 u64 vlan_ex;
1372 u64 vlan_ins;
4a6346d4
DM
1373 u64 gro_pkts;
1374 u64 gro_merged;
b8ff05a9
DM
1375};
1376
1377static void collect_sge_port_stats(const struct adapter *adap,
1378 const struct port_info *p, struct queue_port_stats *s)
1379{
1380 int i;
1381 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1382 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1383
1384 memset(s, 0, sizeof(*s));
1385 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1386 s->tso += tx->tso;
1387 s->tx_csum += tx->tx_cso;
1388 s->rx_csum += rx->stats.rx_cso;
1389 s->vlan_ex += rx->stats.vlan_ex;
1390 s->vlan_ins += tx->vlan_ins;
4a6346d4
DM
1391 s->gro_pkts += rx->stats.lro_pkts;
1392 s->gro_merged += rx->stats.lro_merged;
b8ff05a9
DM
1393 }
1394}
1395
1396static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1397 u64 *data)
1398{
1399 struct port_info *pi = netdev_priv(dev);
1400 struct adapter *adapter = pi->adapter;
1401
1402 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1403
1404 data += sizeof(struct port_stats) / sizeof(u64);
1405 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1406}
1407
1408/*
1409 * Return a version number to identify the type of adapter. The scheme is:
1410 * - bits 0..9: chip version
1411 * - bits 10..15: chip revision
835bb606 1412 * - bits 16..23: register dump version
b8ff05a9
DM
1413 */
1414static inline unsigned int mk_adap_vers(const struct adapter *ap)
1415{
835bb606 1416 return 4 | (ap->params.rev << 10) | (1 << 16);
b8ff05a9
DM
1417}
1418
1419static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1420 unsigned int end)
1421{
1422 u32 *p = buf + start;
1423
1424 for ( ; start <= end; start += sizeof(u32))
1425 *p++ = t4_read_reg(ap, start);
1426}
1427
1428static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1429 void *buf)
1430{
1431 static const unsigned int reg_ranges[] = {
1432 0x1008, 0x1108,
1433 0x1180, 0x11b4,
1434 0x11fc, 0x123c,
1435 0x1300, 0x173c,
1436 0x1800, 0x18fc,
1437 0x3000, 0x30d8,
1438 0x30e0, 0x5924,
1439 0x5960, 0x59d4,
1440 0x5a00, 0x5af8,
1441 0x6000, 0x6098,
1442 0x6100, 0x6150,
1443 0x6200, 0x6208,
1444 0x6240, 0x6248,
1445 0x6280, 0x6338,
1446 0x6370, 0x638c,
1447 0x6400, 0x643c,
1448 0x6500, 0x6524,
1449 0x6a00, 0x6a38,
1450 0x6a60, 0x6a78,
1451 0x6b00, 0x6b84,
1452 0x6bf0, 0x6c84,
1453 0x6cf0, 0x6d84,
1454 0x6df0, 0x6e84,
1455 0x6ef0, 0x6f84,
1456 0x6ff0, 0x7084,
1457 0x70f0, 0x7184,
1458 0x71f0, 0x7284,
1459 0x72f0, 0x7384,
1460 0x73f0, 0x7450,
1461 0x7500, 0x7530,
1462 0x7600, 0x761c,
1463 0x7680, 0x76cc,
1464 0x7700, 0x7798,
1465 0x77c0, 0x77fc,
1466 0x7900, 0x79fc,
1467 0x7b00, 0x7c38,
1468 0x7d00, 0x7efc,
1469 0x8dc0, 0x8e1c,
1470 0x8e30, 0x8e78,
1471 0x8ea0, 0x8f6c,
1472 0x8fc0, 0x9074,
1473 0x90fc, 0x90fc,
1474 0x9400, 0x9458,
1475 0x9600, 0x96bc,
1476 0x9800, 0x9808,
1477 0x9820, 0x983c,
1478 0x9850, 0x9864,
1479 0x9c00, 0x9c6c,
1480 0x9c80, 0x9cec,
1481 0x9d00, 0x9d6c,
1482 0x9d80, 0x9dec,
1483 0x9e00, 0x9e6c,
1484 0x9e80, 0x9eec,
1485 0x9f00, 0x9f6c,
1486 0x9f80, 0x9fec,
1487 0xd004, 0xd03c,
1488 0xdfc0, 0xdfe0,
1489 0xe000, 0xea7c,
1490 0xf000, 0x11190,
835bb606
DM
1491 0x19040, 0x1906c,
1492 0x19078, 0x19080,
1493 0x1908c, 0x19124,
b8ff05a9
DM
1494 0x19150, 0x191b0,
1495 0x191d0, 0x191e8,
1496 0x19238, 0x1924c,
1497 0x193f8, 0x19474,
1498 0x19490, 0x194f8,
1499 0x19800, 0x19f30,
1500 0x1a000, 0x1a06c,
1501 0x1a0b0, 0x1a120,
1502 0x1a128, 0x1a138,
1503 0x1a190, 0x1a1c4,
1504 0x1a1fc, 0x1a1fc,
1505 0x1e040, 0x1e04c,
835bb606 1506 0x1e284, 0x1e28c,
b8ff05a9
DM
1507 0x1e2c0, 0x1e2c0,
1508 0x1e2e0, 0x1e2e0,
1509 0x1e300, 0x1e384,
1510 0x1e3c0, 0x1e3c8,
1511 0x1e440, 0x1e44c,
835bb606 1512 0x1e684, 0x1e68c,
b8ff05a9
DM
1513 0x1e6c0, 0x1e6c0,
1514 0x1e6e0, 0x1e6e0,
1515 0x1e700, 0x1e784,
1516 0x1e7c0, 0x1e7c8,
1517 0x1e840, 0x1e84c,
835bb606 1518 0x1ea84, 0x1ea8c,
b8ff05a9
DM
1519 0x1eac0, 0x1eac0,
1520 0x1eae0, 0x1eae0,
1521 0x1eb00, 0x1eb84,
1522 0x1ebc0, 0x1ebc8,
1523 0x1ec40, 0x1ec4c,
835bb606 1524 0x1ee84, 0x1ee8c,
b8ff05a9
DM
1525 0x1eec0, 0x1eec0,
1526 0x1eee0, 0x1eee0,
1527 0x1ef00, 0x1ef84,
1528 0x1efc0, 0x1efc8,
1529 0x1f040, 0x1f04c,
835bb606 1530 0x1f284, 0x1f28c,
b8ff05a9
DM
1531 0x1f2c0, 0x1f2c0,
1532 0x1f2e0, 0x1f2e0,
1533 0x1f300, 0x1f384,
1534 0x1f3c0, 0x1f3c8,
1535 0x1f440, 0x1f44c,
835bb606 1536 0x1f684, 0x1f68c,
b8ff05a9
DM
1537 0x1f6c0, 0x1f6c0,
1538 0x1f6e0, 0x1f6e0,
1539 0x1f700, 0x1f784,
1540 0x1f7c0, 0x1f7c8,
1541 0x1f840, 0x1f84c,
835bb606 1542 0x1fa84, 0x1fa8c,
b8ff05a9
DM
1543 0x1fac0, 0x1fac0,
1544 0x1fae0, 0x1fae0,
1545 0x1fb00, 0x1fb84,
1546 0x1fbc0, 0x1fbc8,
1547 0x1fc40, 0x1fc4c,
835bb606 1548 0x1fe84, 0x1fe8c,
b8ff05a9
DM
1549 0x1fec0, 0x1fec0,
1550 0x1fee0, 0x1fee0,
1551 0x1ff00, 0x1ff84,
1552 0x1ffc0, 0x1ffc8,
1553 0x20000, 0x2002c,
1554 0x20100, 0x2013c,
1555 0x20190, 0x201c8,
1556 0x20200, 0x20318,
1557 0x20400, 0x20528,
1558 0x20540, 0x20614,
1559 0x21000, 0x21040,
1560 0x2104c, 0x21060,
1561 0x210c0, 0x210ec,
1562 0x21200, 0x21268,
1563 0x21270, 0x21284,
1564 0x212fc, 0x21388,
1565 0x21400, 0x21404,
1566 0x21500, 0x21518,
1567 0x2152c, 0x2153c,
1568 0x21550, 0x21554,
1569 0x21600, 0x21600,
1570 0x21608, 0x21628,
1571 0x21630, 0x2163c,
1572 0x21700, 0x2171c,
1573 0x21780, 0x2178c,
1574 0x21800, 0x21c38,
1575 0x21c80, 0x21d7c,
1576 0x21e00, 0x21e04,
1577 0x22000, 0x2202c,
1578 0x22100, 0x2213c,
1579 0x22190, 0x221c8,
1580 0x22200, 0x22318,
1581 0x22400, 0x22528,
1582 0x22540, 0x22614,
1583 0x23000, 0x23040,
1584 0x2304c, 0x23060,
1585 0x230c0, 0x230ec,
1586 0x23200, 0x23268,
1587 0x23270, 0x23284,
1588 0x232fc, 0x23388,
1589 0x23400, 0x23404,
1590 0x23500, 0x23518,
1591 0x2352c, 0x2353c,
1592 0x23550, 0x23554,
1593 0x23600, 0x23600,
1594 0x23608, 0x23628,
1595 0x23630, 0x2363c,
1596 0x23700, 0x2371c,
1597 0x23780, 0x2378c,
1598 0x23800, 0x23c38,
1599 0x23c80, 0x23d7c,
1600 0x23e00, 0x23e04,
1601 0x24000, 0x2402c,
1602 0x24100, 0x2413c,
1603 0x24190, 0x241c8,
1604 0x24200, 0x24318,
1605 0x24400, 0x24528,
1606 0x24540, 0x24614,
1607 0x25000, 0x25040,
1608 0x2504c, 0x25060,
1609 0x250c0, 0x250ec,
1610 0x25200, 0x25268,
1611 0x25270, 0x25284,
1612 0x252fc, 0x25388,
1613 0x25400, 0x25404,
1614 0x25500, 0x25518,
1615 0x2552c, 0x2553c,
1616 0x25550, 0x25554,
1617 0x25600, 0x25600,
1618 0x25608, 0x25628,
1619 0x25630, 0x2563c,
1620 0x25700, 0x2571c,
1621 0x25780, 0x2578c,
1622 0x25800, 0x25c38,
1623 0x25c80, 0x25d7c,
1624 0x25e00, 0x25e04,
1625 0x26000, 0x2602c,
1626 0x26100, 0x2613c,
1627 0x26190, 0x261c8,
1628 0x26200, 0x26318,
1629 0x26400, 0x26528,
1630 0x26540, 0x26614,
1631 0x27000, 0x27040,
1632 0x2704c, 0x27060,
1633 0x270c0, 0x270ec,
1634 0x27200, 0x27268,
1635 0x27270, 0x27284,
1636 0x272fc, 0x27388,
1637 0x27400, 0x27404,
1638 0x27500, 0x27518,
1639 0x2752c, 0x2753c,
1640 0x27550, 0x27554,
1641 0x27600, 0x27600,
1642 0x27608, 0x27628,
1643 0x27630, 0x2763c,
1644 0x27700, 0x2771c,
1645 0x27780, 0x2778c,
1646 0x27800, 0x27c38,
1647 0x27c80, 0x27d7c,
1648 0x27e00, 0x27e04
1649 };
1650
1651 int i;
1652 struct adapter *ap = netdev2adap(dev);
1653
1654 regs->version = mk_adap_vers(ap);
1655
1656 memset(buf, 0, T4_REGMAP_SIZE);
1657 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1658 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1659}
1660
1661static int restart_autoneg(struct net_device *dev)
1662{
1663 struct port_info *p = netdev_priv(dev);
1664
1665 if (!netif_running(dev))
1666 return -EAGAIN;
1667 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1668 return -EINVAL;
060e0c75 1669 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
b8ff05a9
DM
1670 return 0;
1671}
1672
c5e06360
DM
1673static int identify_port(struct net_device *dev,
1674 enum ethtool_phys_id_state state)
b8ff05a9 1675{
c5e06360 1676 unsigned int val;
060e0c75
DM
1677 struct adapter *adap = netdev2adap(dev);
1678
c5e06360
DM
1679 if (state == ETHTOOL_ID_ACTIVE)
1680 val = 0xffff;
1681 else if (state == ETHTOOL_ID_INACTIVE)
1682 val = 0;
1683 else
1684 return -EINVAL;
b8ff05a9 1685
c5e06360 1686 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
b8ff05a9
DM
1687}
1688
1689static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1690{
1691 unsigned int v = 0;
1692
a0881cab
DM
1693 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1694 type == FW_PORT_TYPE_BT_XAUI) {
b8ff05a9
DM
1695 v |= SUPPORTED_TP;
1696 if (caps & FW_PORT_CAP_SPEED_100M)
1697 v |= SUPPORTED_100baseT_Full;
1698 if (caps & FW_PORT_CAP_SPEED_1G)
1699 v |= SUPPORTED_1000baseT_Full;
1700 if (caps & FW_PORT_CAP_SPEED_10G)
1701 v |= SUPPORTED_10000baseT_Full;
1702 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1703 v |= SUPPORTED_Backplane;
1704 if (caps & FW_PORT_CAP_SPEED_1G)
1705 v |= SUPPORTED_1000baseKX_Full;
1706 if (caps & FW_PORT_CAP_SPEED_10G)
1707 v |= SUPPORTED_10000baseKX4_Full;
1708 } else if (type == FW_PORT_TYPE_KR)
1709 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
a0881cab 1710 else if (type == FW_PORT_TYPE_BP_AP)
7d5e77aa
DM
1711 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1712 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1713 else if (type == FW_PORT_TYPE_BP4_AP)
1714 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1715 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1716 SUPPORTED_10000baseKX4_Full;
a0881cab
DM
1717 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1718 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
b8ff05a9
DM
1719 v |= SUPPORTED_FIBRE;
1720
1721 if (caps & FW_PORT_CAP_ANEG)
1722 v |= SUPPORTED_Autoneg;
1723 return v;
1724}
1725
1726static unsigned int to_fw_linkcaps(unsigned int caps)
1727{
1728 unsigned int v = 0;
1729
1730 if (caps & ADVERTISED_100baseT_Full)
1731 v |= FW_PORT_CAP_SPEED_100M;
1732 if (caps & ADVERTISED_1000baseT_Full)
1733 v |= FW_PORT_CAP_SPEED_1G;
1734 if (caps & ADVERTISED_10000baseT_Full)
1735 v |= FW_PORT_CAP_SPEED_10G;
1736 return v;
1737}
1738
1739static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1740{
1741 const struct port_info *p = netdev_priv(dev);
1742
1743 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
a0881cab 1744 p->port_type == FW_PORT_TYPE_BT_XFI ||
b8ff05a9
DM
1745 p->port_type == FW_PORT_TYPE_BT_XAUI)
1746 cmd->port = PORT_TP;
a0881cab
DM
1747 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1748 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
b8ff05a9 1749 cmd->port = PORT_FIBRE;
a0881cab
DM
1750 else if (p->port_type == FW_PORT_TYPE_SFP) {
1751 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1752 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1753 cmd->port = PORT_DA;
1754 else
1755 cmd->port = PORT_FIBRE;
1756 } else
b8ff05a9
DM
1757 cmd->port = PORT_OTHER;
1758
1759 if (p->mdio_addr >= 0) {
1760 cmd->phy_address = p->mdio_addr;
1761 cmd->transceiver = XCVR_EXTERNAL;
1762 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1763 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1764 } else {
1765 cmd->phy_address = 0; /* not really, but no better option */
1766 cmd->transceiver = XCVR_INTERNAL;
1767 cmd->mdio_support = 0;
1768 }
1769
1770 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1771 cmd->advertising = from_fw_linkcaps(p->port_type,
1772 p->link_cfg.advertising);
70739497
DD
1773 ethtool_cmd_speed_set(cmd,
1774 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
b8ff05a9
DM
1775 cmd->duplex = DUPLEX_FULL;
1776 cmd->autoneg = p->link_cfg.autoneg;
1777 cmd->maxtxpkt = 0;
1778 cmd->maxrxpkt = 0;
1779 return 0;
1780}
1781
1782static unsigned int speed_to_caps(int speed)
1783{
1784 if (speed == SPEED_100)
1785 return FW_PORT_CAP_SPEED_100M;
1786 if (speed == SPEED_1000)
1787 return FW_PORT_CAP_SPEED_1G;
1788 if (speed == SPEED_10000)
1789 return FW_PORT_CAP_SPEED_10G;
1790 return 0;
1791}
1792
1793static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1794{
1795 unsigned int cap;
1796 struct port_info *p = netdev_priv(dev);
1797 struct link_config *lc = &p->link_cfg;
25db0338 1798 u32 speed = ethtool_cmd_speed(cmd);
b8ff05a9
DM
1799
1800 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1801 return -EINVAL;
1802
1803 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1804 /*
1805 * PHY offers a single speed. See if that's what's
1806 * being requested.
1807 */
1808 if (cmd->autoneg == AUTONEG_DISABLE &&
25db0338
DD
1809 (lc->supported & speed_to_caps(speed)))
1810 return 0;
b8ff05a9
DM
1811 return -EINVAL;
1812 }
1813
1814 if (cmd->autoneg == AUTONEG_DISABLE) {
25db0338 1815 cap = speed_to_caps(speed);
b8ff05a9 1816
25db0338
DD
1817 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1818 (speed == SPEED_10000))
b8ff05a9
DM
1819 return -EINVAL;
1820 lc->requested_speed = cap;
1821 lc->advertising = 0;
1822 } else {
1823 cap = to_fw_linkcaps(cmd->advertising);
1824 if (!(lc->supported & cap))
1825 return -EINVAL;
1826 lc->requested_speed = 0;
1827 lc->advertising = cap | FW_PORT_CAP_ANEG;
1828 }
1829 lc->autoneg = cmd->autoneg;
1830
1831 if (netif_running(dev))
060e0c75
DM
1832 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1833 lc);
b8ff05a9
DM
1834 return 0;
1835}
1836
1837static void get_pauseparam(struct net_device *dev,
1838 struct ethtool_pauseparam *epause)
1839{
1840 struct port_info *p = netdev_priv(dev);
1841
1842 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1843 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1844 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1845}
1846
1847static int set_pauseparam(struct net_device *dev,
1848 struct ethtool_pauseparam *epause)
1849{
1850 struct port_info *p = netdev_priv(dev);
1851 struct link_config *lc = &p->link_cfg;
1852
1853 if (epause->autoneg == AUTONEG_DISABLE)
1854 lc->requested_fc = 0;
1855 else if (lc->supported & FW_PORT_CAP_ANEG)
1856 lc->requested_fc = PAUSE_AUTONEG;
1857 else
1858 return -EINVAL;
1859
1860 if (epause->rx_pause)
1861 lc->requested_fc |= PAUSE_RX;
1862 if (epause->tx_pause)
1863 lc->requested_fc |= PAUSE_TX;
1864 if (netif_running(dev))
060e0c75
DM
1865 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1866 lc);
b8ff05a9
DM
1867 return 0;
1868}
1869
b8ff05a9
DM
1870static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1871{
1872 const struct port_info *pi = netdev_priv(dev);
1873 const struct sge *s = &pi->adapter->sge;
1874
1875 e->rx_max_pending = MAX_RX_BUFFERS;
1876 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1877 e->rx_jumbo_max_pending = 0;
1878 e->tx_max_pending = MAX_TXQ_ENTRIES;
1879
1880 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1881 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1882 e->rx_jumbo_pending = 0;
1883 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1884}
1885
1886static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1887{
1888 int i;
1889 const struct port_info *pi = netdev_priv(dev);
1890 struct adapter *adapter = pi->adapter;
1891 struct sge *s = &adapter->sge;
1892
1893 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1894 e->tx_pending > MAX_TXQ_ENTRIES ||
1895 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1896 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1897 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1898 return -EINVAL;
1899
1900 if (adapter->flags & FULL_INIT_DONE)
1901 return -EBUSY;
1902
1903 for (i = 0; i < pi->nqsets; ++i) {
1904 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1905 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1906 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1907 }
1908 return 0;
1909}
1910
1911static int closest_timer(const struct sge *s, int time)
1912{
1913 int i, delta, match = 0, min_delta = INT_MAX;
1914
1915 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1916 delta = time - s->timer_val[i];
1917 if (delta < 0)
1918 delta = -delta;
1919 if (delta < min_delta) {
1920 min_delta = delta;
1921 match = i;
1922 }
1923 }
1924 return match;
1925}
1926
1927static int closest_thres(const struct sge *s, int thres)
1928{
1929 int i, delta, match = 0, min_delta = INT_MAX;
1930
1931 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1932 delta = thres - s->counter_val[i];
1933 if (delta < 0)
1934 delta = -delta;
1935 if (delta < min_delta) {
1936 min_delta = delta;
1937 match = i;
1938 }
1939 }
1940 return match;
1941}
1942
1943/*
1944 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1945 */
1946static unsigned int qtimer_val(const struct adapter *adap,
1947 const struct sge_rspq *q)
1948{
1949 unsigned int idx = q->intr_params >> 1;
1950
1951 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1952}
1953
1954/**
1955 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1956 * @adap: the adapter
1957 * @q: the Rx queue
1958 * @us: the hold-off time in us, or 0 to disable timer
1959 * @cnt: the hold-off packet count, or 0 to disable counter
1960 *
1961 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1962 * one of the two needs to be enabled for the queue to generate interrupts.
1963 */
1964static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1965 unsigned int us, unsigned int cnt)
1966{
1967 if ((us | cnt) == 0)
1968 cnt = 1;
1969
1970 if (cnt) {
1971 int err;
1972 u32 v, new_idx;
1973
1974 new_idx = closest_thres(&adap->sge, cnt);
1975 if (q->desc && q->pktcnt_idx != new_idx) {
1976 /* the queue has already been created, update it */
1977 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1978 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1979 FW_PARAMS_PARAM_YZ(q->cntxt_id);
060e0c75
DM
1980 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1981 &new_idx);
b8ff05a9
DM
1982 if (err)
1983 return err;
1984 }
1985 q->pktcnt_idx = new_idx;
1986 }
1987
1988 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1989 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1990 return 0;
1991}
1992
1993static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1994{
1995 const struct port_info *pi = netdev_priv(dev);
1996 struct adapter *adap = pi->adapter;
1997
1998 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1999 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
2000}
2001
2002static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2003{
2004 const struct port_info *pi = netdev_priv(dev);
2005 const struct adapter *adap = pi->adapter;
2006 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2007
2008 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2009 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2010 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2011 return 0;
2012}
2013
1478b3ee
DM
2014/**
2015 * eeprom_ptov - translate a physical EEPROM address to virtual
2016 * @phys_addr: the physical EEPROM address
2017 * @fn: the PCI function number
2018 * @sz: size of function-specific area
2019 *
2020 * Translate a physical EEPROM address to virtual. The first 1K is
2021 * accessed through virtual addresses starting at 31K, the rest is
2022 * accessed through virtual addresses starting at 0.
2023 *
2024 * The mapping is as follows:
2025 * [0..1K) -> [31K..32K)
2026 * [1K..1K+A) -> [31K-A..31K)
2027 * [1K+A..ES) -> [0..ES-A-1K)
2028 *
2029 * where A = @fn * @sz, and ES = EEPROM size.
b8ff05a9 2030 */
1478b3ee 2031static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
b8ff05a9 2032{
1478b3ee 2033 fn *= sz;
b8ff05a9
DM
2034 if (phys_addr < 1024)
2035 return phys_addr + (31 << 10);
1478b3ee
DM
2036 if (phys_addr < 1024 + fn)
2037 return 31744 - fn + phys_addr - 1024;
b8ff05a9 2038 if (phys_addr < EEPROMSIZE)
1478b3ee 2039 return phys_addr - 1024 - fn;
b8ff05a9
DM
2040 return -EINVAL;
2041}
2042
2043/*
2044 * The next two routines implement eeprom read/write from physical addresses.
b8ff05a9
DM
2045 */
2046static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2047{
1478b3ee 2048 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2049
2050 if (vaddr >= 0)
2051 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2052 return vaddr < 0 ? vaddr : 0;
2053}
2054
2055static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2056{
1478b3ee 2057 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
b8ff05a9
DM
2058
2059 if (vaddr >= 0)
2060 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2061 return vaddr < 0 ? vaddr : 0;
2062}
2063
2064#define EEPROM_MAGIC 0x38E2F10C
2065
2066static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2067 u8 *data)
2068{
2069 int i, err = 0;
2070 struct adapter *adapter = netdev2adap(dev);
2071
2072 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2073 if (!buf)
2074 return -ENOMEM;
2075
2076 e->magic = EEPROM_MAGIC;
2077 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2078 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2079
2080 if (!err)
2081 memcpy(data, buf + e->offset, e->len);
2082 kfree(buf);
2083 return err;
2084}
2085
2086static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2087 u8 *data)
2088{
2089 u8 *buf;
2090 int err = 0;
2091 u32 aligned_offset, aligned_len, *p;
2092 struct adapter *adapter = netdev2adap(dev);
2093
2094 if (eeprom->magic != EEPROM_MAGIC)
2095 return -EINVAL;
2096
2097 aligned_offset = eeprom->offset & ~3;
2098 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2099
1478b3ee
DM
2100 if (adapter->fn > 0) {
2101 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2102
2103 if (aligned_offset < start ||
2104 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2105 return -EPERM;
2106 }
2107
b8ff05a9
DM
2108 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2109 /*
2110 * RMW possibly needed for first or last words.
2111 */
2112 buf = kmalloc(aligned_len, GFP_KERNEL);
2113 if (!buf)
2114 return -ENOMEM;
2115 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2116 if (!err && aligned_len > 4)
2117 err = eeprom_rd_phys(adapter,
2118 aligned_offset + aligned_len - 4,
2119 (u32 *)&buf[aligned_len - 4]);
2120 if (err)
2121 goto out;
2122 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2123 } else
2124 buf = data;
2125
2126 err = t4_seeprom_wp(adapter, false);
2127 if (err)
2128 goto out;
2129
2130 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2131 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2132 aligned_offset += 4;
2133 }
2134
2135 if (!err)
2136 err = t4_seeprom_wp(adapter, true);
2137out:
2138 if (buf != data)
2139 kfree(buf);
2140 return err;
2141}
2142
2143static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2144{
2145 int ret;
2146 const struct firmware *fw;
2147 struct adapter *adap = netdev2adap(netdev);
2148
2149 ef->data[sizeof(ef->data) - 1] = '\0';
2150 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2151 if (ret < 0)
2152 return ret;
2153
2154 ret = t4_load_fw(adap, fw->data, fw->size);
2155 release_firmware(fw);
2156 if (!ret)
2157 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2158 return ret;
2159}
2160
2161#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2162#define BCAST_CRC 0xa0ccc1a6
2163
2164static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2165{
2166 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2167 wol->wolopts = netdev2adap(dev)->wol;
2168 memset(&wol->sopass, 0, sizeof(wol->sopass));
2169}
2170
2171static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2172{
2173 int err = 0;
2174 struct port_info *pi = netdev_priv(dev);
2175
2176 if (wol->wolopts & ~WOL_SUPPORTED)
2177 return -EINVAL;
2178 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2179 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2180 if (wol->wolopts & WAKE_BCAST) {
2181 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2182 ~0ULL, 0, false);
2183 if (!err)
2184 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2185 ~6ULL, ~0ULL, BCAST_CRC, true);
2186 } else
2187 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2188 return err;
2189}
2190
c8f44aff 2191static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 2192{
2ed28baa 2193 const struct port_info *pi = netdev_priv(dev);
c8f44aff 2194 netdev_features_t changed = dev->features ^ features;
19ecae2c 2195 int err;
19ecae2c 2196
2ed28baa
MM
2197 if (!(changed & NETIF_F_HW_VLAN_RX))
2198 return 0;
19ecae2c 2199
2ed28baa
MM
2200 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2201 -1, -1, -1,
2202 !!(features & NETIF_F_HW_VLAN_RX), true);
2203 if (unlikely(err))
2204 dev->features = features ^ NETIF_F_HW_VLAN_RX;
19ecae2c 2205 return err;
87b6cf51
DM
2206}
2207
7850f63f 2208static u32 get_rss_table_size(struct net_device *dev)
671b0060
DM
2209{
2210 const struct port_info *pi = netdev_priv(dev);
671b0060 2211
7850f63f
BH
2212 return pi->rss_size;
2213}
2214
2215static int get_rss_table(struct net_device *dev, u32 *p)
2216{
2217 const struct port_info *pi = netdev_priv(dev);
2218 unsigned int n = pi->rss_size;
2219
671b0060 2220 while (n--)
7850f63f 2221 p[n] = pi->rss[n];
671b0060
DM
2222 return 0;
2223}
2224
7850f63f 2225static int set_rss_table(struct net_device *dev, const u32 *p)
671b0060
DM
2226{
2227 unsigned int i;
2228 struct port_info *pi = netdev_priv(dev);
2229
7850f63f
BH
2230 for (i = 0; i < pi->rss_size; i++)
2231 pi->rss[i] = p[i];
671b0060
DM
2232 if (pi->adapter->flags & FULL_INIT_DONE)
2233 return write_rss(pi, pi->rss);
2234 return 0;
2235}
2236
2237static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 2238 u32 *rules)
671b0060 2239{
f796564a
DM
2240 const struct port_info *pi = netdev_priv(dev);
2241
671b0060 2242 switch (info->cmd) {
f796564a
DM
2243 case ETHTOOL_GRXFH: {
2244 unsigned int v = pi->rss_mode;
2245
2246 info->data = 0;
2247 switch (info->flow_type) {
2248 case TCP_V4_FLOW:
2249 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2250 info->data = RXH_IP_SRC | RXH_IP_DST |
2251 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2252 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2253 info->data = RXH_IP_SRC | RXH_IP_DST;
2254 break;
2255 case UDP_V4_FLOW:
2256 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2257 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2258 info->data = RXH_IP_SRC | RXH_IP_DST |
2259 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2260 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2261 info->data = RXH_IP_SRC | RXH_IP_DST;
2262 break;
2263 case SCTP_V4_FLOW:
2264 case AH_ESP_V4_FLOW:
2265 case IPV4_FLOW:
2266 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2267 info->data = RXH_IP_SRC | RXH_IP_DST;
2268 break;
2269 case TCP_V6_FLOW:
2270 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2271 info->data = RXH_IP_SRC | RXH_IP_DST |
2272 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2273 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2274 info->data = RXH_IP_SRC | RXH_IP_DST;
2275 break;
2276 case UDP_V6_FLOW:
2277 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2278 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2279 info->data = RXH_IP_SRC | RXH_IP_DST |
2280 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2281 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2282 info->data = RXH_IP_SRC | RXH_IP_DST;
2283 break;
2284 case SCTP_V6_FLOW:
2285 case AH_ESP_V6_FLOW:
2286 case IPV6_FLOW:
2287 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2288 info->data = RXH_IP_SRC | RXH_IP_DST;
2289 break;
2290 }
2291 return 0;
2292 }
671b0060 2293 case ETHTOOL_GRXRINGS:
f796564a 2294 info->data = pi->nqsets;
671b0060
DM
2295 return 0;
2296 }
2297 return -EOPNOTSUPP;
2298}
2299
9b07be4b 2300static const struct ethtool_ops cxgb_ethtool_ops = {
b8ff05a9
DM
2301 .get_settings = get_settings,
2302 .set_settings = set_settings,
2303 .get_drvinfo = get_drvinfo,
2304 .get_msglevel = get_msglevel,
2305 .set_msglevel = set_msglevel,
2306 .get_ringparam = get_sge_param,
2307 .set_ringparam = set_sge_param,
2308 .get_coalesce = get_coalesce,
2309 .set_coalesce = set_coalesce,
2310 .get_eeprom_len = get_eeprom_len,
2311 .get_eeprom = get_eeprom,
2312 .set_eeprom = set_eeprom,
2313 .get_pauseparam = get_pauseparam,
2314 .set_pauseparam = set_pauseparam,
b8ff05a9
DM
2315 .get_link = ethtool_op_get_link,
2316 .get_strings = get_strings,
c5e06360 2317 .set_phys_id = identify_port,
b8ff05a9
DM
2318 .nway_reset = restart_autoneg,
2319 .get_sset_count = get_sset_count,
2320 .get_ethtool_stats = get_stats,
2321 .get_regs_len = get_regs_len,
2322 .get_regs = get_regs,
2323 .get_wol = get_wol,
2324 .set_wol = set_wol,
671b0060 2325 .get_rxnfc = get_rxnfc,
7850f63f 2326 .get_rxfh_indir_size = get_rss_table_size,
671b0060
DM
2327 .get_rxfh_indir = get_rss_table,
2328 .set_rxfh_indir = set_rss_table,
b8ff05a9
DM
2329 .flash_device = set_flash,
2330};
2331
2332/*
2333 * debugfs support
2334 */
b8ff05a9
DM
2335static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2336 loff_t *ppos)
2337{
2338 loff_t pos = *ppos;
2339 loff_t avail = file->f_path.dentry->d_inode->i_size;
2340 unsigned int mem = (uintptr_t)file->private_data & 3;
2341 struct adapter *adap = file->private_data - mem;
2342
2343 if (pos < 0)
2344 return -EINVAL;
2345 if (pos >= avail)
2346 return 0;
2347 if (count > avail - pos)
2348 count = avail - pos;
2349
2350 while (count) {
2351 size_t len;
2352 int ret, ofst;
2353 __be32 data[16];
2354
2355 if (mem == MEM_MC)
2356 ret = t4_mc_read(adap, pos, data, NULL);
2357 else
2358 ret = t4_edc_read(adap, mem, pos, data, NULL);
2359 if (ret)
2360 return ret;
2361
2362 ofst = pos % sizeof(data);
2363 len = min(count, sizeof(data) - ofst);
2364 if (copy_to_user(buf, (u8 *)data + ofst, len))
2365 return -EFAULT;
2366
2367 buf += len;
2368 pos += len;
2369 count -= len;
2370 }
2371 count = pos - *ppos;
2372 *ppos = pos;
2373 return count;
2374}
2375
2376static const struct file_operations mem_debugfs_fops = {
2377 .owner = THIS_MODULE,
234e3405 2378 .open = simple_open,
b8ff05a9 2379 .read = mem_read,
6038f373 2380 .llseek = default_llseek,
b8ff05a9
DM
2381};
2382
91744948 2383static void add_debugfs_mem(struct adapter *adap, const char *name,
1dd06ae8 2384 unsigned int idx, unsigned int size_mb)
b8ff05a9
DM
2385{
2386 struct dentry *de;
2387
2388 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2389 (void *)adap + idx, &mem_debugfs_fops);
2390 if (de && de->d_inode)
2391 de->d_inode->i_size = size_mb << 20;
2392}
2393
91744948 2394static int setup_debugfs(struct adapter *adap)
b8ff05a9
DM
2395{
2396 int i;
2397
2398 if (IS_ERR_OR_NULL(adap->debugfs_root))
2399 return -1;
2400
2401 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2402 if (i & EDRAM0_ENABLE)
2403 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2404 if (i & EDRAM1_ENABLE)
2405 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2406 if (i & EXT_MEM_ENABLE)
2407 add_debugfs_mem(adap, "mc", MEM_MC,
2408 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2409 if (adap->l2t)
2410 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2411 &t4_l2t_fops);
2412 return 0;
2413}
2414
2415/*
2416 * upper-layer driver support
2417 */
2418
2419/*
2420 * Allocate an active-open TID and set it to the supplied value.
2421 */
2422int cxgb4_alloc_atid(struct tid_info *t, void *data)
2423{
2424 int atid = -1;
2425
2426 spin_lock_bh(&t->atid_lock);
2427 if (t->afree) {
2428 union aopen_entry *p = t->afree;
2429
f2b7e78d 2430 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
2431 t->afree = p->next;
2432 p->data = data;
2433 t->atids_in_use++;
2434 }
2435 spin_unlock_bh(&t->atid_lock);
2436 return atid;
2437}
2438EXPORT_SYMBOL(cxgb4_alloc_atid);
2439
2440/*
2441 * Release an active-open TID.
2442 */
2443void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2444{
f2b7e78d 2445 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
2446
2447 spin_lock_bh(&t->atid_lock);
2448 p->next = t->afree;
2449 t->afree = p;
2450 t->atids_in_use--;
2451 spin_unlock_bh(&t->atid_lock);
2452}
2453EXPORT_SYMBOL(cxgb4_free_atid);
2454
2455/*
2456 * Allocate a server TID and set it to the supplied value.
2457 */
2458int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2459{
2460 int stid;
2461
2462 spin_lock_bh(&t->stid_lock);
2463 if (family == PF_INET) {
2464 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2465 if (stid < t->nstids)
2466 __set_bit(stid, t->stid_bmap);
2467 else
2468 stid = -1;
2469 } else {
2470 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2471 if (stid < 0)
2472 stid = -1;
2473 }
2474 if (stid >= 0) {
2475 t->stid_tab[stid].data = data;
2476 stid += t->stid_base;
2477 t->stids_in_use++;
2478 }
2479 spin_unlock_bh(&t->stid_lock);
2480 return stid;
2481}
2482EXPORT_SYMBOL(cxgb4_alloc_stid);
2483
dca4faeb
VP
2484/* Allocate a server filter TID and set it to the supplied value.
2485 */
2486int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2487{
2488 int stid;
2489
2490 spin_lock_bh(&t->stid_lock);
2491 if (family == PF_INET) {
2492 stid = find_next_zero_bit(t->stid_bmap,
2493 t->nstids + t->nsftids, t->nstids);
2494 if (stid < (t->nstids + t->nsftids))
2495 __set_bit(stid, t->stid_bmap);
2496 else
2497 stid = -1;
2498 } else {
2499 stid = -1;
2500 }
2501 if (stid >= 0) {
2502 t->stid_tab[stid].data = data;
2503 stid += t->stid_base;
2504 t->stids_in_use++;
2505 }
2506 spin_unlock_bh(&t->stid_lock);
2507 return stid;
2508}
2509EXPORT_SYMBOL(cxgb4_alloc_sftid);
2510
2511/* Release a server TID.
b8ff05a9
DM
2512 */
2513void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2514{
2515 stid -= t->stid_base;
2516 spin_lock_bh(&t->stid_lock);
2517 if (family == PF_INET)
2518 __clear_bit(stid, t->stid_bmap);
2519 else
2520 bitmap_release_region(t->stid_bmap, stid, 2);
2521 t->stid_tab[stid].data = NULL;
2522 t->stids_in_use--;
2523 spin_unlock_bh(&t->stid_lock);
2524}
2525EXPORT_SYMBOL(cxgb4_free_stid);
2526
2527/*
2528 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2529 */
2530static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2531 unsigned int tid)
2532{
2533 struct cpl_tid_release *req;
2534
2535 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2536 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2537 INIT_TP_WR(req, tid);
2538 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2539}
2540
2541/*
2542 * Queue a TID release request and if necessary schedule a work queue to
2543 * process it.
2544 */
31b9c19b 2545static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2546 unsigned int tid)
b8ff05a9
DM
2547{
2548 void **p = &t->tid_tab[tid];
2549 struct adapter *adap = container_of(t, struct adapter, tids);
2550
2551 spin_lock_bh(&adap->tid_release_lock);
2552 *p = adap->tid_release_head;
2553 /* Low 2 bits encode the Tx channel number */
2554 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2555 if (!adap->tid_release_task_busy) {
2556 adap->tid_release_task_busy = true;
3069ee9b 2557 queue_work(workq, &adap->tid_release_task);
b8ff05a9
DM
2558 }
2559 spin_unlock_bh(&adap->tid_release_lock);
2560}
b8ff05a9
DM
2561
2562/*
2563 * Process the list of pending TID release requests.
2564 */
2565static void process_tid_release_list(struct work_struct *work)
2566{
2567 struct sk_buff *skb;
2568 struct adapter *adap;
2569
2570 adap = container_of(work, struct adapter, tid_release_task);
2571
2572 spin_lock_bh(&adap->tid_release_lock);
2573 while (adap->tid_release_head) {
2574 void **p = adap->tid_release_head;
2575 unsigned int chan = (uintptr_t)p & 3;
2576 p = (void *)p - chan;
2577
2578 adap->tid_release_head = *p;
2579 *p = NULL;
2580 spin_unlock_bh(&adap->tid_release_lock);
2581
2582 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2583 GFP_KERNEL)))
2584 schedule_timeout_uninterruptible(1);
2585
2586 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2587 t4_ofld_send(adap, skb);
2588 spin_lock_bh(&adap->tid_release_lock);
2589 }
2590 adap->tid_release_task_busy = false;
2591 spin_unlock_bh(&adap->tid_release_lock);
2592}
2593
2594/*
2595 * Release a TID and inform HW. If we are unable to allocate the release
2596 * message we defer to a work queue.
2597 */
2598void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2599{
2600 void *old;
2601 struct sk_buff *skb;
2602 struct adapter *adap = container_of(t, struct adapter, tids);
2603
2604 old = t->tid_tab[tid];
2605 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2606 if (likely(skb)) {
2607 t->tid_tab[tid] = NULL;
2608 mk_tid_release(skb, chan, tid);
2609 t4_ofld_send(adap, skb);
2610 } else
2611 cxgb4_queue_tid_release(t, chan, tid);
2612 if (old)
2613 atomic_dec(&t->tids_in_use);
2614}
2615EXPORT_SYMBOL(cxgb4_remove_tid);
2616
2617/*
2618 * Allocate and initialize the TID tables. Returns 0 on success.
2619 */
2620static int tid_init(struct tid_info *t)
2621{
2622 size_t size;
f2b7e78d 2623 unsigned int stid_bmap_size;
b8ff05a9
DM
2624 unsigned int natids = t->natids;
2625
dca4faeb 2626 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
2627 size = t->ntids * sizeof(*t->tid_tab) +
2628 natids * sizeof(*t->atid_tab) +
b8ff05a9 2629 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 2630 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 2631 stid_bmap_size * sizeof(long) +
dca4faeb
VP
2632 t->nftids * sizeof(*t->ftid_tab) +
2633 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 2634
b8ff05a9
DM
2635 t->tid_tab = t4_alloc_mem(size);
2636 if (!t->tid_tab)
2637 return -ENOMEM;
2638
2639 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2640 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 2641 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 2642 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
2643 spin_lock_init(&t->stid_lock);
2644 spin_lock_init(&t->atid_lock);
2645
2646 t->stids_in_use = 0;
2647 t->afree = NULL;
2648 t->atids_in_use = 0;
2649 atomic_set(&t->tids_in_use, 0);
2650
2651 /* Setup the free list for atid_tab and clear the stid bitmap. */
2652 if (natids) {
2653 while (--natids)
2654 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2655 t->afree = t->atid_tab;
2656 }
dca4faeb 2657 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b8ff05a9
DM
2658 return 0;
2659}
2660
2661/**
2662 * cxgb4_create_server - create an IP server
2663 * @dev: the device
2664 * @stid: the server TID
2665 * @sip: local IP address to bind server to
2666 * @sport: the server's TCP port
2667 * @queue: queue to direct messages from this server to
2668 *
2669 * Create an IP server for the given port and address.
2670 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2671 */
2672int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2673 __be32 sip, __be16 sport, unsigned int queue)
2674{
2675 unsigned int chan;
2676 struct sk_buff *skb;
2677 struct adapter *adap;
2678 struct cpl_pass_open_req *req;
2679
2680 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2681 if (!skb)
2682 return -ENOMEM;
2683
2684 adap = netdev2adap(dev);
2685 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2686 INIT_TP_WR(req, 0);
2687 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2688 req->local_port = sport;
2689 req->peer_port = htons(0);
2690 req->local_ip = sip;
2691 req->peer_ip = htonl(0);
e46dab4d 2692 chan = rxq_to_chan(&adap->sge, queue);
b8ff05a9
DM
2693 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2694 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2695 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2696 return t4_mgmt_tx(adap, skb);
2697}
2698EXPORT_SYMBOL(cxgb4_create_server);
2699
b8ff05a9
DM
2700/**
2701 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2702 * @mtus: the HW MTU table
2703 * @mtu: the target MTU
2704 * @idx: index of selected entry in the MTU table
2705 *
2706 * Returns the index and the value in the HW MTU table that is closest to
2707 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2708 * table, in which case that smallest available value is selected.
2709 */
2710unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2711 unsigned int *idx)
2712{
2713 unsigned int i = 0;
2714
2715 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2716 ++i;
2717 if (idx)
2718 *idx = i;
2719 return mtus[i];
2720}
2721EXPORT_SYMBOL(cxgb4_best_mtu);
2722
2723/**
2724 * cxgb4_port_chan - get the HW channel of a port
2725 * @dev: the net device for the port
2726 *
2727 * Return the HW Tx channel of the given port.
2728 */
2729unsigned int cxgb4_port_chan(const struct net_device *dev)
2730{
2731 return netdev2pinfo(dev)->tx_chan;
2732}
2733EXPORT_SYMBOL(cxgb4_port_chan);
2734
881806bc
VP
2735unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2736{
2737 struct adapter *adap = netdev2adap(dev);
2738 u32 v;
2739
2740 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2741 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2742}
2743EXPORT_SYMBOL(cxgb4_dbfifo_count);
2744
b8ff05a9
DM
2745/**
2746 * cxgb4_port_viid - get the VI id of a port
2747 * @dev: the net device for the port
2748 *
2749 * Return the VI id of the given port.
2750 */
2751unsigned int cxgb4_port_viid(const struct net_device *dev)
2752{
2753 return netdev2pinfo(dev)->viid;
2754}
2755EXPORT_SYMBOL(cxgb4_port_viid);
2756
2757/**
2758 * cxgb4_port_idx - get the index of a port
2759 * @dev: the net device for the port
2760 *
2761 * Return the index of the given port.
2762 */
2763unsigned int cxgb4_port_idx(const struct net_device *dev)
2764{
2765 return netdev2pinfo(dev)->port_id;
2766}
2767EXPORT_SYMBOL(cxgb4_port_idx);
2768
b8ff05a9
DM
2769void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2770 struct tp_tcp_stats *v6)
2771{
2772 struct adapter *adap = pci_get_drvdata(pdev);
2773
2774 spin_lock(&adap->stats_lock);
2775 t4_tp_get_tcp_stats(adap, v4, v6);
2776 spin_unlock(&adap->stats_lock);
2777}
2778EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2779
2780void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2781 const unsigned int *pgsz_order)
2782{
2783 struct adapter *adap = netdev2adap(dev);
2784
2785 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2786 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2787 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2788 HPZ3(pgsz_order[3]));
2789}
2790EXPORT_SYMBOL(cxgb4_iscsi_init);
2791
3069ee9b
VP
2792int cxgb4_flush_eq_cache(struct net_device *dev)
2793{
2794 struct adapter *adap = netdev2adap(dev);
2795 int ret;
2796
2797 ret = t4_fwaddrspace_write(adap, adap->mbox,
2798 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
2799 return ret;
2800}
2801EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2802
2803static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2804{
2805 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
2806 __be64 indices;
2807 int ret;
2808
2809 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2810 if (!ret) {
404d9e3f
VP
2811 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2812 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
2813 }
2814 return ret;
2815}
2816
2817int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2818 u16 size)
2819{
2820 struct adapter *adap = netdev2adap(dev);
2821 u16 hw_pidx, hw_cidx;
2822 int ret;
2823
2824 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2825 if (ret)
2826 goto out;
2827
2828 if (pidx != hw_pidx) {
2829 u16 delta;
2830
2831 if (pidx >= hw_pidx)
2832 delta = pidx - hw_pidx;
2833 else
2834 delta = size - hw_pidx + pidx;
2835 wmb();
840f3000
VP
2836 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2837 QID(qid) | PIDX(delta));
3069ee9b
VP
2838 }
2839out:
2840 return ret;
2841}
2842EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2843
b8ff05a9
DM
2844static struct pci_driver cxgb4_driver;
2845
2846static void check_neigh_update(struct neighbour *neigh)
2847{
2848 const struct device *parent;
2849 const struct net_device *netdev = neigh->dev;
2850
2851 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2852 netdev = vlan_dev_real_dev(netdev);
2853 parent = netdev->dev.parent;
2854 if (parent && parent->driver == &cxgb4_driver.driver)
2855 t4_l2t_update(dev_get_drvdata(parent), neigh);
2856}
2857
2858static int netevent_cb(struct notifier_block *nb, unsigned long event,
2859 void *data)
2860{
2861 switch (event) {
2862 case NETEVENT_NEIGH_UPDATE:
2863 check_neigh_update(data);
2864 break;
b8ff05a9
DM
2865 case NETEVENT_REDIRECT:
2866 default:
2867 break;
2868 }
2869 return 0;
2870}
2871
2872static bool netevent_registered;
2873static struct notifier_block cxgb4_netevent_nb = {
2874 .notifier_call = netevent_cb
2875};
2876
3069ee9b
VP
2877static void drain_db_fifo(struct adapter *adap, int usecs)
2878{
2879 u32 v;
2880
2881 do {
2882 set_current_state(TASK_UNINTERRUPTIBLE);
2883 schedule_timeout(usecs_to_jiffies(usecs));
2884 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2885 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2886 break;
2887 } while (1);
2888}
2889
2890static void disable_txq_db(struct sge_txq *q)
2891{
2892 spin_lock_irq(&q->db_lock);
2893 q->db_disabled = 1;
2894 spin_unlock_irq(&q->db_lock);
2895}
2896
2897static void enable_txq_db(struct sge_txq *q)
2898{
2899 spin_lock_irq(&q->db_lock);
2900 q->db_disabled = 0;
2901 spin_unlock_irq(&q->db_lock);
2902}
2903
2904static void disable_dbs(struct adapter *adap)
2905{
2906 int i;
2907
2908 for_each_ethrxq(&adap->sge, i)
2909 disable_txq_db(&adap->sge.ethtxq[i].q);
2910 for_each_ofldrxq(&adap->sge, i)
2911 disable_txq_db(&adap->sge.ofldtxq[i].q);
2912 for_each_port(adap, i)
2913 disable_txq_db(&adap->sge.ctrlq[i].q);
2914}
2915
2916static void enable_dbs(struct adapter *adap)
2917{
2918 int i;
2919
2920 for_each_ethrxq(&adap->sge, i)
2921 enable_txq_db(&adap->sge.ethtxq[i].q);
2922 for_each_ofldrxq(&adap->sge, i)
2923 enable_txq_db(&adap->sge.ofldtxq[i].q);
2924 for_each_port(adap, i)
2925 enable_txq_db(&adap->sge.ctrlq[i].q);
2926}
2927
2928static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2929{
2930 u16 hw_pidx, hw_cidx;
2931 int ret;
2932
2933 spin_lock_bh(&q->db_lock);
2934 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2935 if (ret)
2936 goto out;
2937 if (q->db_pidx != hw_pidx) {
2938 u16 delta;
2939
2940 if (q->db_pidx >= hw_pidx)
2941 delta = q->db_pidx - hw_pidx;
2942 else
2943 delta = q->size - hw_pidx + q->db_pidx;
2944 wmb();
840f3000
VP
2945 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2946 QID(q->cntxt_id) | PIDX(delta));
3069ee9b
VP
2947 }
2948out:
2949 q->db_disabled = 0;
2950 spin_unlock_bh(&q->db_lock);
2951 if (ret)
2952 CH_WARN(adap, "DB drop recovery failed.\n");
2953}
2954static void recover_all_queues(struct adapter *adap)
2955{
2956 int i;
2957
2958 for_each_ethrxq(&adap->sge, i)
2959 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2960 for_each_ofldrxq(&adap->sge, i)
2961 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2962 for_each_port(adap, i)
2963 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2964}
2965
881806bc
VP
2966static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2967{
2968 mutex_lock(&uld_mutex);
2969 if (adap->uld_handle[CXGB4_ULD_RDMA])
2970 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2971 cmd);
2972 mutex_unlock(&uld_mutex);
2973}
2974
2975static void process_db_full(struct work_struct *work)
2976{
2977 struct adapter *adap;
881806bc
VP
2978
2979 adap = container_of(work, struct adapter, db_full_task);
2980
881806bc 2981 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3069ee9b 2982 drain_db_fifo(adap, dbfifo_drain_delay);
840f3000
VP
2983 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2984 DBFIFO_HP_INT | DBFIFO_LP_INT,
2985 DBFIFO_HP_INT | DBFIFO_LP_INT);
881806bc 2986 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
881806bc
VP
2987}
2988
2989static void process_db_drop(struct work_struct *work)
2990{
2991 struct adapter *adap;
881806bc 2992
3069ee9b 2993 adap = container_of(work, struct adapter, db_drop_task);
881806bc 2994
3069ee9b
VP
2995 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
2996 disable_dbs(adap);
881806bc 2997 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3069ee9b
VP
2998 drain_db_fifo(adap, 1);
2999 recover_all_queues(adap);
3000 enable_dbs(adap);
881806bc
VP
3001}
3002
3003void t4_db_full(struct adapter *adap)
3004{
840f3000
VP
3005 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3006 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3069ee9b 3007 queue_work(workq, &adap->db_full_task);
881806bc
VP
3008}
3009
3010void t4_db_dropped(struct adapter *adap)
3011{
3069ee9b 3012 queue_work(workq, &adap->db_drop_task);
881806bc
VP
3013}
3014
b8ff05a9
DM
3015static void uld_attach(struct adapter *adap, unsigned int uld)
3016{
3017 void *handle;
3018 struct cxgb4_lld_info lli;
dca4faeb 3019 unsigned short i;
b8ff05a9
DM
3020
3021 lli.pdev = adap->pdev;
3022 lli.l2t = adap->l2t;
3023 lli.tids = &adap->tids;
3024 lli.ports = adap->port;
3025 lli.vr = &adap->vres;
3026 lli.mtus = adap->params.mtus;
3027 if (uld == CXGB4_ULD_RDMA) {
3028 lli.rxq_ids = adap->sge.rdma_rxq;
3029 lli.nrxq = adap->sge.rdmaqs;
3030 } else if (uld == CXGB4_ULD_ISCSI) {
3031 lli.rxq_ids = adap->sge.ofld_rxq;
3032 lli.nrxq = adap->sge.ofldqsets;
3033 }
3034 lli.ntxq = adap->sge.ofldqsets;
3035 lli.nchan = adap->params.nports;
3036 lli.nports = adap->params.nports;
3037 lli.wr_cred = adap->params.ofldq_wr_cred;
3038 lli.adapter_type = adap->params.rev;
3039 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3040 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3041 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3042 (adap->fn * 4));
b8ff05a9 3043 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
060e0c75
DM
3044 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3045 (adap->fn * 4));
dca4faeb
VP
3046 lli.filt_mode = tp_vlan_pri_map;
3047 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3048 for (i = 0; i < NCHAN; i++)
3049 lli.tx_modq[i] = i;
b8ff05a9
DM
3050 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3051 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3052 lli.fw_vers = adap->params.fw_vers;
3069ee9b 3053 lli.dbfifo_int_thresh = dbfifo_int_thresh;
dca4faeb
VP
3054 lli.sge_pktshift = adap->sge.pktshift;
3055 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
b8ff05a9
DM
3056
3057 handle = ulds[uld].add(&lli);
3058 if (IS_ERR(handle)) {
3059 dev_warn(adap->pdev_dev,
3060 "could not attach to the %s driver, error %ld\n",
3061 uld_str[uld], PTR_ERR(handle));
3062 return;
3063 }
3064
3065 adap->uld_handle[uld] = handle;
3066
3067 if (!netevent_registered) {
3068 register_netevent_notifier(&cxgb4_netevent_nb);
3069 netevent_registered = true;
3070 }
e29f5dbc
DM
3071
3072 if (adap->flags & FULL_INIT_DONE)
3073 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
3074}
3075
3076static void attach_ulds(struct adapter *adap)
3077{
3078 unsigned int i;
3079
3080 mutex_lock(&uld_mutex);
3081 list_add_tail(&adap->list_node, &adapter_list);
3082 for (i = 0; i < CXGB4_ULD_MAX; i++)
3083 if (ulds[i].add)
3084 uld_attach(adap, i);
3085 mutex_unlock(&uld_mutex);
3086}
3087
3088static void detach_ulds(struct adapter *adap)
3089{
3090 unsigned int i;
3091
3092 mutex_lock(&uld_mutex);
3093 list_del(&adap->list_node);
3094 for (i = 0; i < CXGB4_ULD_MAX; i++)
3095 if (adap->uld_handle[i]) {
3096 ulds[i].state_change(adap->uld_handle[i],
3097 CXGB4_STATE_DETACH);
3098 adap->uld_handle[i] = NULL;
3099 }
3100 if (netevent_registered && list_empty(&adapter_list)) {
3101 unregister_netevent_notifier(&cxgb4_netevent_nb);
3102 netevent_registered = false;
3103 }
3104 mutex_unlock(&uld_mutex);
3105}
3106
3107static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3108{
3109 unsigned int i;
3110
3111 mutex_lock(&uld_mutex);
3112 for (i = 0; i < CXGB4_ULD_MAX; i++)
3113 if (adap->uld_handle[i])
3114 ulds[i].state_change(adap->uld_handle[i], new_state);
3115 mutex_unlock(&uld_mutex);
3116}
3117
3118/**
3119 * cxgb4_register_uld - register an upper-layer driver
3120 * @type: the ULD type
3121 * @p: the ULD methods
3122 *
3123 * Registers an upper-layer driver with this driver and notifies the ULD
3124 * about any presently available devices that support its type. Returns
3125 * %-EBUSY if a ULD of the same type is already registered.
3126 */
3127int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3128{
3129 int ret = 0;
3130 struct adapter *adap;
3131
3132 if (type >= CXGB4_ULD_MAX)
3133 return -EINVAL;
3134 mutex_lock(&uld_mutex);
3135 if (ulds[type].add) {
3136 ret = -EBUSY;
3137 goto out;
3138 }
3139 ulds[type] = *p;
3140 list_for_each_entry(adap, &adapter_list, list_node)
3141 uld_attach(adap, type);
3142out: mutex_unlock(&uld_mutex);
3143 return ret;
3144}
3145EXPORT_SYMBOL(cxgb4_register_uld);
3146
3147/**
3148 * cxgb4_unregister_uld - unregister an upper-layer driver
3149 * @type: the ULD type
3150 *
3151 * Unregisters an existing upper-layer driver.
3152 */
3153int cxgb4_unregister_uld(enum cxgb4_uld type)
3154{
3155 struct adapter *adap;
3156
3157 if (type >= CXGB4_ULD_MAX)
3158 return -EINVAL;
3159 mutex_lock(&uld_mutex);
3160 list_for_each_entry(adap, &adapter_list, list_node)
3161 adap->uld_handle[type] = NULL;
3162 ulds[type].add = NULL;
3163 mutex_unlock(&uld_mutex);
3164 return 0;
3165}
3166EXPORT_SYMBOL(cxgb4_unregister_uld);
3167
3168/**
3169 * cxgb_up - enable the adapter
3170 * @adap: adapter being enabled
3171 *
3172 * Called when the first port is enabled, this function performs the
3173 * actions necessary to make an adapter operational, such as completing
3174 * the initialization of HW modules, and enabling interrupts.
3175 *
3176 * Must be called with the rtnl lock held.
3177 */
3178static int cxgb_up(struct adapter *adap)
3179{
aaefae9b 3180 int err;
b8ff05a9 3181
aaefae9b
DM
3182 err = setup_sge_queues(adap);
3183 if (err)
3184 goto out;
3185 err = setup_rss(adap);
3186 if (err)
3187 goto freeq;
b8ff05a9
DM
3188
3189 if (adap->flags & USING_MSIX) {
aaefae9b 3190 name_msix_vecs(adap);
b8ff05a9
DM
3191 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3192 adap->msix_info[0].desc, adap);
3193 if (err)
3194 goto irq_err;
3195
3196 err = request_msix_queue_irqs(adap);
3197 if (err) {
3198 free_irq(adap->msix_info[0].vec, adap);
3199 goto irq_err;
3200 }
3201 } else {
3202 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3203 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 3204 adap->port[0]->name, adap);
b8ff05a9
DM
3205 if (err)
3206 goto irq_err;
3207 }
3208 enable_rx(adap);
3209 t4_sge_start(adap);
3210 t4_intr_enable(adap);
aaefae9b 3211 adap->flags |= FULL_INIT_DONE;
b8ff05a9
DM
3212 notify_ulds(adap, CXGB4_STATE_UP);
3213 out:
3214 return err;
3215 irq_err:
3216 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
3217 freeq:
3218 t4_free_sge_resources(adap);
b8ff05a9
DM
3219 goto out;
3220}
3221
3222static void cxgb_down(struct adapter *adapter)
3223{
3224 t4_intr_disable(adapter);
3225 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
3226 cancel_work_sync(&adapter->db_full_task);
3227 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 3228 adapter->tid_release_task_busy = false;
204dc3c0 3229 adapter->tid_release_head = NULL;
b8ff05a9
DM
3230
3231 if (adapter->flags & USING_MSIX) {
3232 free_msix_queue_irqs(adapter);
3233 free_irq(adapter->msix_info[0].vec, adapter);
3234 } else
3235 free_irq(adapter->pdev->irq, adapter);
3236 quiesce_rx(adapter);
aaefae9b
DM
3237 t4_sge_stop(adapter);
3238 t4_free_sge_resources(adapter);
3239 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
3240}
3241
3242/*
3243 * net_device operations
3244 */
3245static int cxgb_open(struct net_device *dev)
3246{
3247 int err;
3248 struct port_info *pi = netdev_priv(dev);
3249 struct adapter *adapter = pi->adapter;
3250
6a3c869a
DM
3251 netif_carrier_off(dev);
3252
aaefae9b
DM
3253 if (!(adapter->flags & FULL_INIT_DONE)) {
3254 err = cxgb_up(adapter);
3255 if (err < 0)
3256 return err;
3257 }
b8ff05a9 3258
f68707b8
DM
3259 err = link_start(dev);
3260 if (!err)
3261 netif_tx_start_all_queues(dev);
3262 return err;
b8ff05a9
DM
3263}
3264
3265static int cxgb_close(struct net_device *dev)
3266{
b8ff05a9
DM
3267 struct port_info *pi = netdev_priv(dev);
3268 struct adapter *adapter = pi->adapter;
3269
3270 netif_tx_stop_all_queues(dev);
3271 netif_carrier_off(dev);
060e0c75 3272 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
b8ff05a9
DM
3273}
3274
f2b7e78d
VP
3275/* Return an error number if the indicated filter isn't writable ...
3276 */
3277static int writable_filter(struct filter_entry *f)
3278{
3279 if (f->locked)
3280 return -EPERM;
3281 if (f->pending)
3282 return -EBUSY;
3283
3284 return 0;
3285}
3286
3287/* Delete the filter at the specified index (if valid). The checks for all
3288 * the common problems with doing this like the filter being locked, currently
3289 * pending in another operation, etc.
3290 */
3291static int delete_filter(struct adapter *adapter, unsigned int fidx)
3292{
3293 struct filter_entry *f;
3294 int ret;
3295
dca4faeb 3296 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
3297 return -EINVAL;
3298
3299 f = &adapter->tids.ftid_tab[fidx];
3300 ret = writable_filter(f);
3301 if (ret)
3302 return ret;
3303 if (f->valid)
3304 return del_filter_wr(adapter, fidx);
3305
3306 return 0;
3307}
3308
dca4faeb
VP
3309int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3310 __be32 sip, __be16 sport, unsigned int queue)
3311{
3312 int ret;
3313 struct filter_entry *f;
3314 struct adapter *adap;
3315 int i;
3316 u8 *val;
3317
3318 adap = netdev2adap(dev);
3319
3320 /* Check to make sure the filter requested is writable ...
3321 */
3322 f = &adap->tids.ftid_tab[stid];
3323 ret = writable_filter(f);
3324 if (ret)
3325 return ret;
3326
3327 /* Clear out any old resources being used by the filter before
3328 * we start constructing the new filter.
3329 */
3330 if (f->valid)
3331 clear_filter(adap, f);
3332
3333 /* Clear out filter specifications */
3334 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3335 f->fs.val.lport = cpu_to_be16(sport);
3336 f->fs.mask.lport = ~0;
3337 val = (u8 *)&sip;
3338 if ((val[0] | val[1] | val[2] | val[3]) != 0)
3339 for (i = 0; i < 4; i++) {
3340 f->fs.val.lip[i] = val[i];
3341 f->fs.mask.lip[i] = ~0;
3342 }
3343
3344 f->fs.dirsteer = 1;
3345 f->fs.iq = queue;
3346 /* Mark filter as locked */
3347 f->locked = 1;
3348 f->fs.rpttid = 1;
3349
3350 ret = set_filter_wr(adap, stid);
3351 if (ret) {
3352 clear_filter(adap, f);
3353 return ret;
3354 }
3355
3356 return 0;
3357}
3358EXPORT_SYMBOL(cxgb4_create_server_filter);
3359
3360int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3361 unsigned int queue, bool ipv6)
3362{
3363 int ret;
3364 struct filter_entry *f;
3365 struct adapter *adap;
3366
3367 adap = netdev2adap(dev);
3368 f = &adap->tids.ftid_tab[stid];
3369 /* Unlock the filter */
3370 f->locked = 0;
3371
3372 ret = delete_filter(adap, stid);
3373 if (ret)
3374 return ret;
3375
3376 return 0;
3377}
3378EXPORT_SYMBOL(cxgb4_remove_server_filter);
3379
f5152c90
DM
3380static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3381 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
3382{
3383 struct port_stats stats;
3384 struct port_info *p = netdev_priv(dev);
3385 struct adapter *adapter = p->adapter;
b8ff05a9
DM
3386
3387 spin_lock(&adapter->stats_lock);
3388 t4_get_port_stats(adapter, p->tx_chan, &stats);
3389 spin_unlock(&adapter->stats_lock);
3390
3391 ns->tx_bytes = stats.tx_octets;
3392 ns->tx_packets = stats.tx_frames;
3393 ns->rx_bytes = stats.rx_octets;
3394 ns->rx_packets = stats.rx_frames;
3395 ns->multicast = stats.rx_mcast_frames;
3396
3397 /* detailed rx_errors */
3398 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3399 stats.rx_runt;
3400 ns->rx_over_errors = 0;
3401 ns->rx_crc_errors = stats.rx_fcs_err;
3402 ns->rx_frame_errors = stats.rx_symbol_err;
3403 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
3404 stats.rx_ovflow2 + stats.rx_ovflow3 +
3405 stats.rx_trunc0 + stats.rx_trunc1 +
3406 stats.rx_trunc2 + stats.rx_trunc3;
3407 ns->rx_missed_errors = 0;
3408
3409 /* detailed tx_errors */
3410 ns->tx_aborted_errors = 0;
3411 ns->tx_carrier_errors = 0;
3412 ns->tx_fifo_errors = 0;
3413 ns->tx_heartbeat_errors = 0;
3414 ns->tx_window_errors = 0;
3415
3416 ns->tx_errors = stats.tx_error_frames;
3417 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3418 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3419 return ns;
3420}
3421
3422static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3423{
060e0c75 3424 unsigned int mbox;
b8ff05a9
DM
3425 int ret = 0, prtad, devad;
3426 struct port_info *pi = netdev_priv(dev);
3427 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3428
3429 switch (cmd) {
3430 case SIOCGMIIPHY:
3431 if (pi->mdio_addr < 0)
3432 return -EOPNOTSUPP;
3433 data->phy_id = pi->mdio_addr;
3434 break;
3435 case SIOCGMIIREG:
3436 case SIOCSMIIREG:
3437 if (mdio_phy_id_is_c45(data->phy_id)) {
3438 prtad = mdio_phy_id_prtad(data->phy_id);
3439 devad = mdio_phy_id_devad(data->phy_id);
3440 } else if (data->phy_id < 32) {
3441 prtad = data->phy_id;
3442 devad = 0;
3443 data->reg_num &= 0x1f;
3444 } else
3445 return -EINVAL;
3446
060e0c75 3447 mbox = pi->adapter->fn;
b8ff05a9 3448 if (cmd == SIOCGMIIREG)
060e0c75 3449 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
3450 data->reg_num, &data->val_out);
3451 else
060e0c75 3452 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
3453 data->reg_num, data->val_in);
3454 break;
3455 default:
3456 return -EOPNOTSUPP;
3457 }
3458 return ret;
3459}
3460
3461static void cxgb_set_rxmode(struct net_device *dev)
3462{
3463 /* unfortunately we can't return errors to the stack */
3464 set_rxmode(dev, -1, false);
3465}
3466
3467static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3468{
3469 int ret;
3470 struct port_info *pi = netdev_priv(dev);
3471
3472 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
3473 return -EINVAL;
060e0c75
DM
3474 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
3475 -1, -1, -1, true);
b8ff05a9
DM
3476 if (!ret)
3477 dev->mtu = new_mtu;
3478 return ret;
3479}
3480
3481static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3482{
3483 int ret;
3484 struct sockaddr *addr = p;
3485 struct port_info *pi = netdev_priv(dev);
3486
3487 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 3488 return -EADDRNOTAVAIL;
b8ff05a9 3489
060e0c75
DM
3490 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
3491 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
3492 if (ret < 0)
3493 return ret;
3494
3495 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3496 pi->xact_addr_filt = ret;
3497 return 0;
3498}
3499
b8ff05a9
DM
3500#ifdef CONFIG_NET_POLL_CONTROLLER
3501static void cxgb_netpoll(struct net_device *dev)
3502{
3503 struct port_info *pi = netdev_priv(dev);
3504 struct adapter *adap = pi->adapter;
3505
3506 if (adap->flags & USING_MSIX) {
3507 int i;
3508 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3509
3510 for (i = pi->nqsets; i; i--, rx++)
3511 t4_sge_intr_msix(0, &rx->rspq);
3512 } else
3513 t4_intr_handler(adap)(0, adap);
3514}
3515#endif
3516
3517static const struct net_device_ops cxgb4_netdev_ops = {
3518 .ndo_open = cxgb_open,
3519 .ndo_stop = cxgb_close,
3520 .ndo_start_xmit = t4_eth_xmit,
9be793bf 3521 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
3522 .ndo_set_rx_mode = cxgb_set_rxmode,
3523 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 3524 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
3525 .ndo_validate_addr = eth_validate_addr,
3526 .ndo_do_ioctl = cxgb_ioctl,
3527 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
3528#ifdef CONFIG_NET_POLL_CONTROLLER
3529 .ndo_poll_controller = cxgb_netpoll,
3530#endif
3531};
3532
3533void t4_fatal_err(struct adapter *adap)
3534{
3535 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
3536 t4_intr_disable(adap);
3537 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3538}
3539
3540static void setup_memwin(struct adapter *adap)
3541{
3542 u32 bar0;
3543
3544 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
3545 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
3546 (bar0 + MEMWIN0_BASE) | BIR(0) |
3547 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
3548 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
3549 (bar0 + MEMWIN1_BASE) | BIR(0) |
3550 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
3551 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3552 (bar0 + MEMWIN2_BASE) | BIR(0) |
3553 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
636f9d37
VP
3554}
3555
3556static void setup_memwin_rdma(struct adapter *adap)
3557{
1ae970e0
DM
3558 if (adap->vres.ocq.size) {
3559 unsigned int start, sz_kb;
3560
3561 start = pci_resource_start(adap->pdev, 2) +
3562 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3563 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3564 t4_write_reg(adap,
3565 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
3566 start | BIR(1) | WINDOW(ilog2(sz_kb)));
3567 t4_write_reg(adap,
3568 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
3569 adap->vres.ocq.start);
3570 t4_read_reg(adap,
3571 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
3572 }
b8ff05a9
DM
3573}
3574
02b5fb8e
DM
3575static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3576{
3577 u32 v;
3578 int ret;
3579
3580 /* get device capabilities */
3581 memset(c, 0, sizeof(*c));
3582 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3583 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 3584 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
060e0c75 3585 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
02b5fb8e
DM
3586 if (ret < 0)
3587 return ret;
3588
3589 /* select capabilities we'll be using */
3590 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3591 if (!vf_acls)
3592 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3593 else
3594 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3595 } else if (vf_acls) {
3596 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3597 return ret;
3598 }
3599 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3600 FW_CMD_REQUEST | FW_CMD_WRITE);
060e0c75 3601 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
02b5fb8e
DM
3602 if (ret < 0)
3603 return ret;
3604
060e0c75 3605 ret = t4_config_glbl_rss(adap, adap->fn,
02b5fb8e
DM
3606 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3607 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3608 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
3609 if (ret < 0)
3610 return ret;
3611
060e0c75
DM
3612 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
3613 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
02b5fb8e
DM
3614 if (ret < 0)
3615 return ret;
3616
3617 t4_sge_init(adap);
3618
02b5fb8e
DM
3619 /* tweak some settings */
3620 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
3621 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
3622 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
3623 v = t4_read_reg(adap, TP_PIO_DATA);
3624 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
060e0c75 3625
dca4faeb
VP
3626 /* first 4 Tx modulation queues point to consecutive Tx channels */
3627 adap->params.tp.tx_modq_map = 0xE4;
3628 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3629 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
3630
3631 /* associate each Tx modulation queue with consecutive Tx channels */
3632 v = 0x84218421;
3633 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3634 &v, 1, A_TP_TX_SCHED_HDR);
3635 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3636 &v, 1, A_TP_TX_SCHED_FIFO);
3637 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3638 &v, 1, A_TP_TX_SCHED_PCMD);
3639
3640#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3641 if (is_offload(adap)) {
3642 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
3643 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3644 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3645 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3646 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3647 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
3648 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3649 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3650 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3651 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3652 }
3653
060e0c75
DM
3654 /* get basic stuff going */
3655 return t4_early_init(adap, adap->fn);
02b5fb8e
DM
3656}
3657
b8ff05a9
DM
3658/*
3659 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3660 */
3661#define MAX_ATIDS 8192U
3662
636f9d37
VP
3663/*
3664 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3665 *
3666 * If the firmware we're dealing with has Configuration File support, then
3667 * we use that to perform all configuration
3668 */
3669
3670/*
3671 * Tweak configuration based on module parameters, etc. Most of these have
3672 * defaults assigned to them by Firmware Configuration Files (if we're using
3673 * them) but need to be explicitly set if we're using hard-coded
3674 * initialization. But even in the case of using Firmware Configuration
3675 * Files, we'd like to expose the ability to change these via module
3676 * parameters so these are essentially common tweaks/settings for
3677 * Configuration Files and hard-coded initialization ...
3678 */
3679static int adap_init0_tweaks(struct adapter *adapter)
3680{
3681 /*
3682 * Fix up various Host-Dependent Parameters like Page Size, Cache
3683 * Line Size, etc. The firmware default is for a 4KB Page Size and
3684 * 64B Cache Line Size ...
3685 */
3686 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3687
3688 /*
3689 * Process module parameters which affect early initialization.
3690 */
3691 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3692 dev_err(&adapter->pdev->dev,
3693 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3694 rx_dma_offset);
3695 rx_dma_offset = 2;
3696 }
3697 t4_set_reg_field(adapter, SGE_CONTROL,
3698 PKTSHIFT_MASK,
3699 PKTSHIFT(rx_dma_offset));
3700
3701 /*
3702 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3703 * adds the pseudo header itself.
3704 */
3705 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
3706 CSUM_HAS_PSEUDO_HDR, 0);
3707
3708 return 0;
3709}
3710
3711/*
3712 * Attempt to initialize the adapter via a Firmware Configuration File.
3713 */
3714static int adap_init0_config(struct adapter *adapter, int reset)
3715{
3716 struct fw_caps_config_cmd caps_cmd;
3717 const struct firmware *cf;
3718 unsigned long mtype = 0, maddr = 0;
3719 u32 finiver, finicsum, cfcsum;
3720 int ret, using_flash;
3721
3722 /*
3723 * Reset device if necessary.
3724 */
3725 if (reset) {
3726 ret = t4_fw_reset(adapter, adapter->mbox,
3727 PIORSTMODE | PIORST);
3728 if (ret < 0)
3729 goto bye;
3730 }
3731
3732 /*
3733 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3734 * then use that. Otherwise, use the configuration file stored
3735 * in the adapter flash ...
3736 */
3737 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
3738 if (ret < 0) {
3739 using_flash = 1;
3740 mtype = FW_MEMTYPE_CF_FLASH;
3741 maddr = t4_flash_cfg_addr(adapter);
3742 } else {
3743 u32 params[7], val[7];
3744
3745 using_flash = 0;
3746 if (cf->size >= FLASH_CFG_MAX_SIZE)
3747 ret = -ENOMEM;
3748 else {
3749 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3750 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3751 ret = t4_query_params(adapter, adapter->mbox,
3752 adapter->fn, 0, 1, params, val);
3753 if (ret == 0) {
3754 /*
3755 * For t4_memory_write() below addresses and
3756 * sizes have to be in terms of multiples of 4
3757 * bytes. So, if the Configuration File isn't
3758 * a multiple of 4 bytes in length we'll have
3759 * to write that out separately since we can't
3760 * guarantee that the bytes following the
3761 * residual byte in the buffer returned by
3762 * request_firmware() are zeroed out ...
3763 */
3764 size_t resid = cf->size & 0x3;
3765 size_t size = cf->size & ~0x3;
3766 __be32 *data = (__be32 *)cf->data;
3767
3768 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
3769 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
3770
3771 ret = t4_memory_write(adapter, mtype, maddr,
3772 size, data);
3773 if (ret == 0 && resid != 0) {
3774 union {
3775 __be32 word;
3776 char buf[4];
3777 } last;
3778 int i;
3779
3780 last.word = data[size >> 2];
3781 for (i = resid; i < 4; i++)
3782 last.buf[i] = 0;
3783 ret = t4_memory_write(adapter, mtype,
3784 maddr + size,
3785 4, &last.word);
3786 }
3787 }
3788 }
3789
3790 release_firmware(cf);
3791 if (ret)
3792 goto bye;
3793 }
3794
3795 /*
3796 * Issue a Capability Configuration command to the firmware to get it
3797 * to parse the Configuration File. We don't use t4_fw_config_file()
3798 * because we want the ability to modify various features after we've
3799 * processed the configuration file ...
3800 */
3801 memset(&caps_cmd, 0, sizeof(caps_cmd));
3802 caps_cmd.op_to_write =
3803 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3804 FW_CMD_REQUEST |
3805 FW_CMD_READ);
ce91a923 3806 caps_cmd.cfvalid_to_len16 =
636f9d37
VP
3807 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
3808 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3809 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
3810 FW_LEN16(caps_cmd));
3811 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3812 &caps_cmd);
3813 if (ret < 0)
3814 goto bye;
3815
3816 finiver = ntohl(caps_cmd.finiver);
3817 finicsum = ntohl(caps_cmd.finicsum);
3818 cfcsum = ntohl(caps_cmd.cfcsum);
3819 if (finicsum != cfcsum)
3820 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3821 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3822 finicsum, cfcsum);
3823
636f9d37
VP
3824 /*
3825 * And now tell the firmware to use the configuration we just loaded.
3826 */
3827 caps_cmd.op_to_write =
3828 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3829 FW_CMD_REQUEST |
3830 FW_CMD_WRITE);
ce91a923 3831 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3832 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3833 NULL);
3834 if (ret < 0)
3835 goto bye;
3836
3837 /*
3838 * Tweak configuration based on system architecture, module
3839 * parameters, etc.
3840 */
3841 ret = adap_init0_tweaks(adapter);
3842 if (ret < 0)
3843 goto bye;
3844
3845 /*
3846 * And finally tell the firmware to initialize itself using the
3847 * parameters from the Configuration File.
3848 */
3849 ret = t4_fw_initialize(adapter, adapter->mbox);
3850 if (ret < 0)
3851 goto bye;
3852
3853 /*
3854 * Return successfully and note that we're operating with parameters
3855 * not supplied by the driver, rather than from hard-wired
3856 * initialization constants burried in the driver.
3857 */
3858 adapter->flags |= USING_SOFT_PARAMS;
3859 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3860 "Configuration File %s, version %#x, computed checksum %#x\n",
3861 (using_flash
3862 ? "in device FLASH"
3863 : "/lib/firmware/" FW_CFNAME),
3864 finiver, cfcsum);
3865 return 0;
3866
3867 /*
3868 * Something bad happened. Return the error ... (If the "error"
3869 * is that there's no Configuration File on the adapter we don't
3870 * want to issue a warning since this is fairly common.)
3871 */
3872bye:
3873 if (ret != -ENOENT)
3874 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
3875 -ret);
3876 return ret;
3877}
3878
13ee15d3
VP
3879/*
3880 * Attempt to initialize the adapter via hard-coded, driver supplied
3881 * parameters ...
3882 */
3883static int adap_init0_no_config(struct adapter *adapter, int reset)
3884{
3885 struct sge *s = &adapter->sge;
3886 struct fw_caps_config_cmd caps_cmd;
3887 u32 v;
3888 int i, ret;
3889
3890 /*
3891 * Reset device if necessary
3892 */
3893 if (reset) {
3894 ret = t4_fw_reset(adapter, adapter->mbox,
3895 PIORSTMODE | PIORST);
3896 if (ret < 0)
3897 goto bye;
3898 }
3899
3900 /*
3901 * Get device capabilities and select which we'll be using.
3902 */
3903 memset(&caps_cmd, 0, sizeof(caps_cmd));
3904 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3905 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 3906 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
13ee15d3
VP
3907 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3908 &caps_cmd);
3909 if (ret < 0)
3910 goto bye;
3911
13ee15d3
VP
3912 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3913 if (!vf_acls)
3914 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3915 else
3916 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3917 } else if (vf_acls) {
3918 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
3919 goto bye;
3920 }
3921 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3922 FW_CMD_REQUEST | FW_CMD_WRITE);
3923 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3924 NULL);
3925 if (ret < 0)
3926 goto bye;
3927
3928 /*
3929 * Tweak configuration based on system architecture, module
3930 * parameters, etc.
3931 */
3932 ret = adap_init0_tweaks(adapter);
3933 if (ret < 0)
3934 goto bye;
3935
3936 /*
3937 * Select RSS Global Mode we want to use. We use "Basic Virtual"
3938 * mode which maps each Virtual Interface to its own section of
3939 * the RSS Table and we turn on all map and hash enables ...
3940 */
3941 adapter->flags |= RSS_TNLALLLOOKUP;
3942 ret = t4_config_glbl_rss(adapter, adapter->mbox,
3943 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3944 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3945 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
3946 ((adapter->flags & RSS_TNLALLLOOKUP) ?
3947 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
3948 if (ret < 0)
3949 goto bye;
3950
3951 /*
3952 * Set up our own fundamental resource provisioning ...
3953 */
3954 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
3955 PFRES_NEQ, PFRES_NETHCTRL,
3956 PFRES_NIQFLINT, PFRES_NIQ,
3957 PFRES_TC, PFRES_NVI,
3958 FW_PFVF_CMD_CMASK_MASK,
3959 pfvfres_pmask(adapter, adapter->fn, 0),
3960 PFRES_NEXACTF,
3961 PFRES_R_CAPS, PFRES_WX_CAPS);
3962 if (ret < 0)
3963 goto bye;
3964
3965 /*
3966 * Perform low level SGE initialization. We need to do this before we
3967 * send the firmware the INITIALIZE command because that will cause
3968 * any other PF Drivers which are waiting for the Master
3969 * Initialization to proceed forward.
3970 */
3971 for (i = 0; i < SGE_NTIMERS - 1; i++)
3972 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
3973 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3974 s->counter_val[0] = 1;
3975 for (i = 1; i < SGE_NCOUNTERS; i++)
3976 s->counter_val[i] = min(intr_cnt[i - 1],
3977 THRESHOLD_0_GET(THRESHOLD_0_MASK));
3978 t4_sge_init(adapter);
3979
3980#ifdef CONFIG_PCI_IOV
3981 /*
3982 * Provision resource limits for Virtual Functions. We currently
3983 * grant them all the same static resource limits except for the Port
3984 * Access Rights Mask which we're assigning based on the PF. All of
3985 * the static provisioning stuff for both the PF and VF really needs
3986 * to be managed in a persistent manner for each device which the
3987 * firmware controls.
3988 */
3989 {
3990 int pf, vf;
3991
3992 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3993 if (num_vf[pf] <= 0)
3994 continue;
3995
3996 /* VF numbering starts at 1! */
3997 for (vf = 1; vf <= num_vf[pf]; vf++) {
3998 ret = t4_cfg_pfvf(adapter, adapter->mbox,
3999 pf, vf,
4000 VFRES_NEQ, VFRES_NETHCTRL,
4001 VFRES_NIQFLINT, VFRES_NIQ,
4002 VFRES_TC, VFRES_NVI,
4003 FW_PFVF_CMD_CMASK_GET(
4004 FW_PFVF_CMD_CMASK_MASK),
4005 pfvfres_pmask(
4006 adapter, pf, vf),
4007 VFRES_NEXACTF,
4008 VFRES_R_CAPS, VFRES_WX_CAPS);
4009 if (ret < 0)
4010 dev_warn(adapter->pdev_dev,
4011 "failed to "\
4012 "provision pf/vf=%d/%d; "
4013 "err=%d\n", pf, vf, ret);
4014 }
4015 }
4016 }
4017#endif
4018
4019 /*
4020 * Set up the default filter mode. Later we'll want to implement this
4021 * via a firmware command, etc. ... This needs to be done before the
4022 * firmare initialization command ... If the selected set of fields
4023 * isn't equal to the default value, we'll need to make sure that the
4024 * field selections will fit in the 36-bit budget.
4025 */
4026 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
404d9e3f 4027 int j, bits = 0;
13ee15d3 4028
404d9e3f
VP
4029 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4030 switch (tp_vlan_pri_map & (1 << j)) {
13ee15d3
VP
4031 case 0:
4032 /* compressed filter field not enabled */
4033 break;
4034 case FCOE_MASK:
4035 bits += 1;
4036 break;
4037 case PORT_MASK:
4038 bits += 3;
4039 break;
4040 case VNIC_ID_MASK:
4041 bits += 17;
4042 break;
4043 case VLAN_MASK:
4044 bits += 17;
4045 break;
4046 case TOS_MASK:
4047 bits += 8;
4048 break;
4049 case PROTOCOL_MASK:
4050 bits += 8;
4051 break;
4052 case ETHERTYPE_MASK:
4053 bits += 16;
4054 break;
4055 case MACMATCH_MASK:
4056 bits += 9;
4057 break;
4058 case MPSHITTYPE_MASK:
4059 bits += 3;
4060 break;
4061 case FRAGMENTATION_MASK:
4062 bits += 1;
4063 break;
4064 }
4065
4066 if (bits > 36) {
4067 dev_err(adapter->pdev_dev,
4068 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4069 " using %#x\n", tp_vlan_pri_map, bits,
4070 TP_VLAN_PRI_MAP_DEFAULT);
4071 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4072 }
4073 }
4074 v = tp_vlan_pri_map;
4075 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4076 &v, 1, TP_VLAN_PRI_MAP);
4077
4078 /*
4079 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4080 * to support any of the compressed filter fields above. Newer
4081 * versions of the firmware do this automatically but it doesn't hurt
4082 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4083 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4084 * since the firmware automatically turns this on and off when we have
4085 * a non-zero number of filters active (since it does have a
4086 * performance impact).
4087 */
4088 if (tp_vlan_pri_map)
4089 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4090 FIVETUPLELOOKUP_MASK,
4091 FIVETUPLELOOKUP_MASK);
4092
4093 /*
4094 * Tweak some settings.
4095 */
4096 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4097 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4098 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4099 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4100
4101 /*
4102 * Get basic stuff going by issuing the Firmware Initialize command.
4103 * Note that this _must_ be after all PFVF commands ...
4104 */
4105 ret = t4_fw_initialize(adapter, adapter->mbox);
4106 if (ret < 0)
4107 goto bye;
4108
4109 /*
4110 * Return successfully!
4111 */
4112 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4113 "driver parameters\n");
4114 return 0;
4115
4116 /*
4117 * Something bad happened. Return the error ...
4118 */
4119bye:
4120 return ret;
4121}
4122
b8ff05a9
DM
4123/*
4124 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4125 */
4126static int adap_init0(struct adapter *adap)
4127{
4128 int ret;
4129 u32 v, port_vec;
4130 enum dev_state state;
4131 u32 params[7], val[7];
9a4da2cd 4132 struct fw_caps_config_cmd caps_cmd;
636f9d37 4133 int reset = 1, j;
b8ff05a9 4134
636f9d37
VP
4135 /*
4136 * Contact FW, advertising Master capability (and potentially forcing
4137 * ourselves as the Master PF if our module parameter force_init is
4138 * set).
4139 */
4140 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4141 force_init ? MASTER_MUST : MASTER_MAY,
4142 &state);
b8ff05a9
DM
4143 if (ret < 0) {
4144 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4145 ret);
4146 return ret;
4147 }
636f9d37
VP
4148 if (ret == adap->mbox)
4149 adap->flags |= MASTER_PF;
4150 if (force_init && state == DEV_STATE_INIT)
4151 state = DEV_STATE_UNINIT;
b8ff05a9 4152
636f9d37
VP
4153 /*
4154 * If we're the Master PF Driver and the device is uninitialized,
4155 * then let's consider upgrading the firmware ... (We always want
4156 * to check the firmware version number in order to A. get it for
4157 * later reporting and B. to warn if the currently loaded firmware
4158 * is excessively mismatched relative to the driver.)
4159 */
4160 ret = t4_check_fw_version(adap);
4161 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4162 if (ret == -EINVAL || ret > 0) {
4163 if (upgrade_fw(adap) >= 0) {
4164 /*
4165 * Note that the chip was reset as part of the
4166 * firmware upgrade so we don't reset it again
4167 * below and grab the new firmware version.
4168 */
4169 reset = 0;
4170 ret = t4_check_fw_version(adap);
4171 }
4172 }
4173 if (ret < 0)
4174 return ret;
4175 }
b8ff05a9 4176
636f9d37
VP
4177 /*
4178 * Grab VPD parameters. This should be done after we establish a
4179 * connection to the firmware since some of the VPD parameters
4180 * (notably the Core Clock frequency) are retrieved via requests to
4181 * the firmware. On the other hand, we need these fairly early on
4182 * so we do this right after getting ahold of the firmware.
4183 */
4184 ret = get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
4185 if (ret < 0)
4186 goto bye;
a0881cab 4187
636f9d37 4188 /*
13ee15d3
VP
4189 * Find out what ports are available to us. Note that we need to do
4190 * this before calling adap_init0_no_config() since it needs nports
4191 * and portvec ...
636f9d37
VP
4192 */
4193 v =
4194 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4195 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4196 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
a0881cab
DM
4197 if (ret < 0)
4198 goto bye;
4199
636f9d37
VP
4200 adap->params.nports = hweight32(port_vec);
4201 adap->params.portvec = port_vec;
4202
4203 /*
4204 * If the firmware is initialized already (and we're not forcing a
4205 * master initialization), note that we're living with existing
4206 * adapter parameters. Otherwise, it's time to try initializing the
4207 * adapter ...
4208 */
4209 if (state == DEV_STATE_INIT) {
4210 dev_info(adap->pdev_dev, "Coming up as %s: "\
4211 "Adapter already initialized\n",
4212 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4213 adap->flags |= USING_SOFT_PARAMS;
4214 } else {
4215 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4216 "Initializing adapter\n");
636f9d37
VP
4217
4218 /*
4219 * If the firmware doesn't support Configuration
4220 * Files warn user and exit,
4221 */
4222 if (ret < 0)
13ee15d3 4223 dev_warn(adap->pdev_dev, "Firmware doesn't support "
636f9d37 4224 "configuration file.\n");
13ee15d3
VP
4225 if (force_old_init)
4226 ret = adap_init0_no_config(adap, reset);
636f9d37
VP
4227 else {
4228 /*
13ee15d3
VP
4229 * Find out whether we're dealing with a version of
4230 * the firmware which has configuration file support.
636f9d37 4231 */
13ee15d3
VP
4232 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4233 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4234 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4235 params, val);
636f9d37 4236
13ee15d3
VP
4237 /*
4238 * If the firmware doesn't support Configuration
4239 * Files, use the old Driver-based, hard-wired
4240 * initialization. Otherwise, try using the
4241 * Configuration File support and fall back to the
4242 * Driver-based initialization if there's no
4243 * Configuration File found.
4244 */
4245 if (ret < 0)
4246 ret = adap_init0_no_config(adap, reset);
4247 else {
4248 /*
4249 * The firmware provides us with a memory
4250 * buffer where we can load a Configuration
4251 * File from the host if we want to override
4252 * the Configuration File in flash.
4253 */
4254
4255 ret = adap_init0_config(adap, reset);
4256 if (ret == -ENOENT) {
4257 dev_info(adap->pdev_dev,
4258 "No Configuration File present "
4259 "on adapter. Using hard-wired "
4260 "configuration parameters.\n");
4261 ret = adap_init0_no_config(adap, reset);
4262 }
636f9d37
VP
4263 }
4264 }
4265 if (ret < 0) {
4266 dev_err(adap->pdev_dev,
4267 "could not initialize adapter, error %d\n",
4268 -ret);
4269 goto bye;
4270 }
4271 }
4272
4273 /*
4274 * If we're living with non-hard-coded parameters (either from a
4275 * Firmware Configuration File or values programmed by a different PF
4276 * Driver), give the SGE code a chance to pull in anything that it
4277 * needs ... Note that this must be called after we retrieve our VPD
4278 * parameters in order to know how to convert core ticks to seconds.
4279 */
4280 if (adap->flags & USING_SOFT_PARAMS) {
4281 ret = t4_sge_init(adap);
4282 if (ret < 0)
4283 goto bye;
4284 }
4285
9a4da2cd
VP
4286 if (is_bypass_device(adap->pdev->device))
4287 adap->params.bypass = 1;
4288
636f9d37
VP
4289 /*
4290 * Grab some of our basic fundamental operating parameters.
4291 */
4292#define FW_PARAM_DEV(param) \
4293 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4294 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4295
b8ff05a9 4296#define FW_PARAM_PFVF(param) \
636f9d37
VP
4297 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4298 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4299 FW_PARAMS_PARAM_Y(0) | \
4300 FW_PARAMS_PARAM_Z(0)
b8ff05a9 4301
636f9d37 4302 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
4303 params[1] = FW_PARAM_PFVF(L2T_START);
4304 params[2] = FW_PARAM_PFVF(L2T_END);
4305 params[3] = FW_PARAM_PFVF(FILTER_START);
4306 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 4307 params[5] = FW_PARAM_PFVF(IQFLINT_START);
636f9d37 4308 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
b8ff05a9
DM
4309 if (ret < 0)
4310 goto bye;
636f9d37
VP
4311 adap->sge.egr_start = val[0];
4312 adap->l2t_start = val[1];
4313 adap->l2t_end = val[2];
b8ff05a9
DM
4314 adap->tids.ftid_base = val[3];
4315 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 4316 adap->sge.ingr_start = val[5];
b8ff05a9 4317
636f9d37
VP
4318 /* query params related to active filter region */
4319 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4320 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4321 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4322 /* If Active filter size is set we enable establishing
4323 * offload connection through firmware work request
4324 */
4325 if ((val[0] != val[1]) && (ret >= 0)) {
4326 adap->flags |= FW_OFLD_CONN;
4327 adap->tids.aftid_base = val[0];
4328 adap->tids.aftid_end = val[1];
4329 }
4330
636f9d37
VP
4331 /*
4332 * Get device capabilities so we can determine what resources we need
4333 * to manage.
4334 */
4335 memset(&caps_cmd, 0, sizeof(caps_cmd));
9a4da2cd 4336 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
13ee15d3 4337 FW_CMD_REQUEST | FW_CMD_READ);
ce91a923 4338 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
4339 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4340 &caps_cmd);
4341 if (ret < 0)
4342 goto bye;
4343
13ee15d3 4344 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
4345 /* query offload-related parameters */
4346 params[0] = FW_PARAM_DEV(NTID);
4347 params[1] = FW_PARAM_PFVF(SERVER_START);
4348 params[2] = FW_PARAM_PFVF(SERVER_END);
4349 params[3] = FW_PARAM_PFVF(TDDP_START);
4350 params[4] = FW_PARAM_PFVF(TDDP_END);
4351 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
636f9d37
VP
4352 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4353 params, val);
b8ff05a9
DM
4354 if (ret < 0)
4355 goto bye;
4356 adap->tids.ntids = val[0];
4357 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4358 adap->tids.stid_base = val[1];
4359 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37
VP
4360 /*
4361 * Setup server filter region. Divide the availble filter
4362 * region into two parts. Regular filters get 1/3rd and server
4363 * filters get 2/3rd part. This is only enabled if workarond
4364 * path is enabled.
4365 * 1. For regular filters.
4366 * 2. Server filter: This are special filters which are used
4367 * to redirect SYN packets to offload queue.
4368 */
4369 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4370 adap->tids.sftid_base = adap->tids.ftid_base +
4371 DIV_ROUND_UP(adap->tids.nftids, 3);
4372 adap->tids.nsftids = adap->tids.nftids -
4373 DIV_ROUND_UP(adap->tids.nftids, 3);
4374 adap->tids.nftids = adap->tids.sftid_base -
4375 adap->tids.ftid_base;
4376 }
b8ff05a9
DM
4377 adap->vres.ddp.start = val[3];
4378 adap->vres.ddp.size = val[4] - val[3] + 1;
4379 adap->params.ofldq_wr_cred = val[5];
636f9d37 4380
b8ff05a9
DM
4381 adap->params.offload = 1;
4382 }
636f9d37 4383 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
4384 params[0] = FW_PARAM_PFVF(STAG_START);
4385 params[1] = FW_PARAM_PFVF(STAG_END);
4386 params[2] = FW_PARAM_PFVF(RQ_START);
4387 params[3] = FW_PARAM_PFVF(RQ_END);
4388 params[4] = FW_PARAM_PFVF(PBL_START);
4389 params[5] = FW_PARAM_PFVF(PBL_END);
636f9d37
VP
4390 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4391 params, val);
b8ff05a9
DM
4392 if (ret < 0)
4393 goto bye;
4394 adap->vres.stag.start = val[0];
4395 adap->vres.stag.size = val[1] - val[0] + 1;
4396 adap->vres.rq.start = val[2];
4397 adap->vres.rq.size = val[3] - val[2] + 1;
4398 adap->vres.pbl.start = val[4];
4399 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
4400
4401 params[0] = FW_PARAM_PFVF(SQRQ_START);
4402 params[1] = FW_PARAM_PFVF(SQRQ_END);
4403 params[2] = FW_PARAM_PFVF(CQ_START);
4404 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
4405 params[4] = FW_PARAM_PFVF(OCQ_START);
4406 params[5] = FW_PARAM_PFVF(OCQ_END);
636f9d37 4407 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
a0881cab
DM
4408 if (ret < 0)
4409 goto bye;
4410 adap->vres.qp.start = val[0];
4411 adap->vres.qp.size = val[1] - val[0] + 1;
4412 adap->vres.cq.start = val[2];
4413 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
4414 adap->vres.ocq.start = val[4];
4415 adap->vres.ocq.size = val[5] - val[4] + 1;
b8ff05a9 4416 }
636f9d37 4417 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
4418 params[0] = FW_PARAM_PFVF(ISCSI_START);
4419 params[1] = FW_PARAM_PFVF(ISCSI_END);
636f9d37
VP
4420 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
4421 params, val);
b8ff05a9
DM
4422 if (ret < 0)
4423 goto bye;
4424 adap->vres.iscsi.start = val[0];
4425 adap->vres.iscsi.size = val[1] - val[0] + 1;
4426 }
4427#undef FW_PARAM_PFVF
4428#undef FW_PARAM_DEV
4429
636f9d37
VP
4430 /*
4431 * These are finalized by FW initialization, load their values now.
4432 */
b8ff05a9
DM
4433 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4434 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
636f9d37 4435 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
b8ff05a9
DM
4436 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4437 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4438 adap->params.b_wnd);
7ee9ff94 4439
636f9d37
VP
4440 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4441 for (j = 0; j < NCHAN; j++)
4442 adap->params.tp.tx_modq[j] = j;
7ee9ff94 4443
636f9d37 4444 adap->flags |= FW_OK;
b8ff05a9
DM
4445 return 0;
4446
4447 /*
636f9d37
VP
4448 * Something bad happened. If a command timed out or failed with EIO
4449 * FW does not operate within its spec or something catastrophic
4450 * happened to HW/FW, stop issuing commands.
b8ff05a9 4451 */
636f9d37
VP
4452bye:
4453 if (ret != -ETIMEDOUT && ret != -EIO)
4454 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
4455 return ret;
4456}
4457
204dc3c0
DM
4458/* EEH callbacks */
4459
4460static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4461 pci_channel_state_t state)
4462{
4463 int i;
4464 struct adapter *adap = pci_get_drvdata(pdev);
4465
4466 if (!adap)
4467 goto out;
4468
4469 rtnl_lock();
4470 adap->flags &= ~FW_OK;
4471 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4472 for_each_port(adap, i) {
4473 struct net_device *dev = adap->port[i];
4474
4475 netif_device_detach(dev);
4476 netif_carrier_off(dev);
4477 }
4478 if (adap->flags & FULL_INIT_DONE)
4479 cxgb_down(adap);
4480 rtnl_unlock();
4481 pci_disable_device(pdev);
4482out: return state == pci_channel_io_perm_failure ?
4483 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4484}
4485
4486static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4487{
4488 int i, ret;
4489 struct fw_caps_config_cmd c;
4490 struct adapter *adap = pci_get_drvdata(pdev);
4491
4492 if (!adap) {
4493 pci_restore_state(pdev);
4494 pci_save_state(pdev);
4495 return PCI_ERS_RESULT_RECOVERED;
4496 }
4497
4498 if (pci_enable_device(pdev)) {
4499 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
4500 return PCI_ERS_RESULT_DISCONNECT;
4501 }
4502
4503 pci_set_master(pdev);
4504 pci_restore_state(pdev);
4505 pci_save_state(pdev);
4506 pci_cleanup_aer_uncorrect_error_status(pdev);
4507
4508 if (t4_wait_dev_ready(adap) < 0)
4509 return PCI_ERS_RESULT_DISCONNECT;
060e0c75 4510 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
204dc3c0
DM
4511 return PCI_ERS_RESULT_DISCONNECT;
4512 adap->flags |= FW_OK;
4513 if (adap_init1(adap, &c))
4514 return PCI_ERS_RESULT_DISCONNECT;
4515
4516 for_each_port(adap, i) {
4517 struct port_info *p = adap2pinfo(adap, i);
4518
060e0c75
DM
4519 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
4520 NULL, NULL);
204dc3c0
DM
4521 if (ret < 0)
4522 return PCI_ERS_RESULT_DISCONNECT;
4523 p->viid = ret;
4524 p->xact_addr_filt = -1;
4525 }
4526
4527 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4528 adap->params.b_wnd);
1ae970e0 4529 setup_memwin(adap);
204dc3c0
DM
4530 if (cxgb_up(adap))
4531 return PCI_ERS_RESULT_DISCONNECT;
4532 return PCI_ERS_RESULT_RECOVERED;
4533}
4534
4535static void eeh_resume(struct pci_dev *pdev)
4536{
4537 int i;
4538 struct adapter *adap = pci_get_drvdata(pdev);
4539
4540 if (!adap)
4541 return;
4542
4543 rtnl_lock();
4544 for_each_port(adap, i) {
4545 struct net_device *dev = adap->port[i];
4546
4547 if (netif_running(dev)) {
4548 link_start(dev);
4549 cxgb_set_rxmode(dev);
4550 }
4551 netif_device_attach(dev);
4552 }
4553 rtnl_unlock();
4554}
4555
3646f0e5 4556static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
4557 .error_detected = eeh_err_detected,
4558 .slot_reset = eeh_slot_reset,
4559 .resume = eeh_resume,
4560};
4561
b8ff05a9
DM
4562static inline bool is_10g_port(const struct link_config *lc)
4563{
4564 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
4565}
4566
4567static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
4568 unsigned int size, unsigned int iqe_size)
4569{
4570 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
4571 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
4572 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
4573 q->iqe_len = iqe_size;
4574 q->size = size;
4575}
4576
4577/*
4578 * Perform default configuration of DMA queues depending on the number and type
4579 * of ports we found and the number of available CPUs. Most settings can be
4580 * modified by the admin prior to actual use.
4581 */
91744948 4582static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
4583{
4584 struct sge *s = &adap->sge;
4585 int i, q10g = 0, n10g = 0, qidx = 0;
4586
4587 for_each_port(adap, i)
4588 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
4589
4590 /*
4591 * We default to 1 queue per non-10G port and up to # of cores queues
4592 * per 10G port.
4593 */
4594 if (n10g)
4595 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
4596 if (q10g > netif_get_num_default_rss_queues())
4597 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
4598
4599 for_each_port(adap, i) {
4600 struct port_info *pi = adap2pinfo(adap, i);
4601
4602 pi->first_qset = qidx;
4603 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
4604 qidx += pi->nqsets;
4605 }
4606
4607 s->ethqsets = qidx;
4608 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4609
4610 if (is_offload(adap)) {
4611 /*
4612 * For offload we use 1 queue/channel if all ports are up to 1G,
4613 * otherwise we divide all available queues amongst the channels
4614 * capped by the number of available cores.
4615 */
4616 if (n10g) {
4617 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4618 num_online_cpus());
4619 s->ofldqsets = roundup(i, adap->params.nports);
4620 } else
4621 s->ofldqsets = adap->params.nports;
4622 /* For RDMA one Rx queue per channel suffices */
4623 s->rdmaqs = adap->params.nports;
4624 }
4625
4626 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4627 struct sge_eth_rxq *r = &s->ethrxq[i];
4628
4629 init_rspq(&r->rspq, 0, 0, 1024, 64);
4630 r->fl.size = 72;
4631 }
4632
4633 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4634 s->ethtxq[i].q.size = 1024;
4635
4636 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4637 s->ctrlq[i].q.size = 512;
4638
4639 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4640 s->ofldtxq[i].q.size = 1024;
4641
4642 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4643 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4644
4645 init_rspq(&r->rspq, 0, 0, 1024, 64);
4646 r->rspq.uld = CXGB4_ULD_ISCSI;
4647 r->fl.size = 72;
4648 }
4649
4650 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4651 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4652
4653 init_rspq(&r->rspq, 0, 0, 511, 64);
4654 r->rspq.uld = CXGB4_ULD_RDMA;
4655 r->fl.size = 72;
4656 }
4657
4658 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
4659 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
4660}
4661
4662/*
4663 * Reduce the number of Ethernet queues across all ports to at most n.
4664 * n provides at least one queue per port.
4665 */
91744948 4666static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
4667{
4668 int i;
4669 struct port_info *pi;
4670
4671 while (n < adap->sge.ethqsets)
4672 for_each_port(adap, i) {
4673 pi = adap2pinfo(adap, i);
4674 if (pi->nqsets > 1) {
4675 pi->nqsets--;
4676 adap->sge.ethqsets--;
4677 if (adap->sge.ethqsets <= n)
4678 break;
4679 }
4680 }
4681
4682 n = 0;
4683 for_each_port(adap, i) {
4684 pi = adap2pinfo(adap, i);
4685 pi->first_qset = n;
4686 n += pi->nqsets;
4687 }
4688}
4689
4690/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4691#define EXTRA_VECS 2
4692
91744948 4693static int enable_msix(struct adapter *adap)
b8ff05a9
DM
4694{
4695 int ofld_need = 0;
4696 int i, err, want, need;
4697 struct sge *s = &adap->sge;
4698 unsigned int nchan = adap->params.nports;
4699 struct msix_entry entries[MAX_INGQ + 1];
4700
4701 for (i = 0; i < ARRAY_SIZE(entries); ++i)
4702 entries[i].entry = i;
4703
4704 want = s->max_ethqsets + EXTRA_VECS;
4705 if (is_offload(adap)) {
4706 want += s->rdmaqs + s->ofldqsets;
4707 /* need nchan for each possible ULD */
4708 ofld_need = 2 * nchan;
4709 }
4710 need = adap->params.nports + EXTRA_VECS + ofld_need;
4711
4712 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
4713 want = err;
4714
4715 if (!err) {
4716 /*
4717 * Distribute available vectors to the various queue groups.
4718 * Every group gets its minimum requirement and NIC gets top
4719 * priority for leftovers.
4720 */
4721 i = want - EXTRA_VECS - ofld_need;
4722 if (i < s->max_ethqsets) {
4723 s->max_ethqsets = i;
4724 if (i < s->ethqsets)
4725 reduce_ethqs(adap, i);
4726 }
4727 if (is_offload(adap)) {
4728 i = want - EXTRA_VECS - s->max_ethqsets;
4729 i -= ofld_need - nchan;
4730 s->ofldqsets = (i / nchan) * nchan; /* round down */
4731 }
4732 for (i = 0; i < want; ++i)
4733 adap->msix_info[i].vec = entries[i].vector;
4734 } else if (err > 0)
4735 dev_info(adap->pdev_dev,
4736 "only %d MSI-X vectors left, not using MSI-X\n", err);
4737 return err;
4738}
4739
4740#undef EXTRA_VECS
4741
91744948 4742static int init_rss(struct adapter *adap)
671b0060
DM
4743{
4744 unsigned int i, j;
4745
4746 for_each_port(adap, i) {
4747 struct port_info *pi = adap2pinfo(adap, i);
4748
4749 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4750 if (!pi->rss)
4751 return -ENOMEM;
4752 for (j = 0; j < pi->rss_size; j++)
278bc429 4753 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
671b0060
DM
4754 }
4755 return 0;
4756}
4757
91744948 4758static void print_port_info(const struct net_device *dev)
b8ff05a9
DM
4759{
4760 static const char *base[] = {
a0881cab 4761 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
7d5e77aa 4762 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
b8ff05a9
DM
4763 };
4764
b8ff05a9 4765 char buf[80];
118969ed 4766 char *bufp = buf;
f1a051b9 4767 const char *spd = "";
118969ed
DM
4768 const struct port_info *pi = netdev_priv(dev);
4769 const struct adapter *adap = pi->adapter;
f1a051b9
DM
4770
4771 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4772 spd = " 2.5 GT/s";
4773 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4774 spd = " 5 GT/s";
b8ff05a9 4775
118969ed
DM
4776 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4777 bufp += sprintf(bufp, "100/");
4778 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4779 bufp += sprintf(bufp, "1000/");
4780 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4781 bufp += sprintf(bufp, "10G/");
4782 if (bufp != buf)
4783 --bufp;
4784 sprintf(bufp, "BASE-%s", base[pi->port_type]);
4785
4786 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4787 adap->params.vpd.id, adap->params.rev, buf,
4788 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4789 (adap->flags & USING_MSIX) ? " MSI-X" :
4790 (adap->flags & USING_MSI) ? " MSI" : "");
4791 netdev_info(dev, "S/N: %s, E/C: %s\n",
4792 adap->params.vpd.sn, adap->params.vpd.ec);
b8ff05a9
DM
4793}
4794
91744948 4795static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 4796{
e5c8ae5f 4797 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
4798}
4799
06546391
DM
4800/*
4801 * Free the following resources:
4802 * - memory used for tables
4803 * - MSI/MSI-X
4804 * - net devices
4805 * - resources FW is holding for us
4806 */
4807static void free_some_resources(struct adapter *adapter)
4808{
4809 unsigned int i;
4810
4811 t4_free_mem(adapter->l2t);
4812 t4_free_mem(adapter->tids.tid_tab);
4813 disable_msi(adapter);
4814
4815 for_each_port(adapter, i)
671b0060
DM
4816 if (adapter->port[i]) {
4817 kfree(adap2pinfo(adapter, i)->rss);
06546391 4818 free_netdev(adapter->port[i]);
671b0060 4819 }
06546391 4820 if (adapter->flags & FW_OK)
060e0c75 4821 t4_fw_bye(adapter, adapter->fn);
06546391
DM
4822}
4823
2ed28baa 4824#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 4825#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9
DM
4826 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4827
1dd06ae8 4828static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9
DM
4829{
4830 int func, i, err;
4831 struct port_info *pi;
c8f44aff 4832 bool highdma = false;
b8ff05a9
DM
4833 struct adapter *adapter = NULL;
4834
4835 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4836
4837 err = pci_request_regions(pdev, KBUILD_MODNAME);
4838 if (err) {
4839 /* Just info, some other driver may have claimed the device. */
4840 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4841 return err;
4842 }
4843
060e0c75 4844 /* We control everything through one PF */
b8ff05a9 4845 func = PCI_FUNC(pdev->devfn);
060e0c75 4846 if (func != ent->driver_data) {
204dc3c0 4847 pci_save_state(pdev); /* to restore SR-IOV later */
b8ff05a9 4848 goto sriov;
204dc3c0 4849 }
b8ff05a9
DM
4850
4851 err = pci_enable_device(pdev);
4852 if (err) {
4853 dev_err(&pdev->dev, "cannot enable PCI device\n");
4854 goto out_release_regions;
4855 }
4856
4857 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 4858 highdma = true;
b8ff05a9
DM
4859 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4860 if (err) {
4861 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4862 "coherent allocations\n");
4863 goto out_disable_device;
4864 }
4865 } else {
4866 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4867 if (err) {
4868 dev_err(&pdev->dev, "no usable DMA configuration\n");
4869 goto out_disable_device;
4870 }
4871 }
4872
4873 pci_enable_pcie_error_reporting(pdev);
ef306b50 4874 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
4875 pci_set_master(pdev);
4876 pci_save_state(pdev);
4877
4878 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4879 if (!adapter) {
4880 err = -ENOMEM;
4881 goto out_disable_device;
4882 }
4883
4884 adapter->regs = pci_ioremap_bar(pdev, 0);
4885 if (!adapter->regs) {
4886 dev_err(&pdev->dev, "cannot map device registers\n");
4887 err = -ENOMEM;
4888 goto out_free_adapter;
4889 }
4890
4891 adapter->pdev = pdev;
4892 adapter->pdev_dev = &pdev->dev;
3069ee9b 4893 adapter->mbox = func;
060e0c75 4894 adapter->fn = func;
b8ff05a9
DM
4895 adapter->msg_enable = dflt_msg_enable;
4896 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4897
4898 spin_lock_init(&adapter->stats_lock);
4899 spin_lock_init(&adapter->tid_release_lock);
4900
4901 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
4902 INIT_WORK(&adapter->db_full_task, process_db_full);
4903 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
4904
4905 err = t4_prep_adapter(adapter);
4906 if (err)
4907 goto out_unmap_bar;
636f9d37 4908 setup_memwin(adapter);
b8ff05a9 4909 err = adap_init0(adapter);
636f9d37 4910 setup_memwin_rdma(adapter);
b8ff05a9
DM
4911 if (err)
4912 goto out_unmap_bar;
4913
4914 for_each_port(adapter, i) {
4915 struct net_device *netdev;
4916
4917 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4918 MAX_ETH_QSETS);
4919 if (!netdev) {
4920 err = -ENOMEM;
4921 goto out_free_dev;
4922 }
4923
4924 SET_NETDEV_DEV(netdev, &pdev->dev);
4925
4926 adapter->port[i] = netdev;
4927 pi = netdev_priv(netdev);
4928 pi->adapter = adapter;
4929 pi->xact_addr_filt = -1;
b8ff05a9 4930 pi->port_id = i;
b8ff05a9
DM
4931 netdev->irq = pdev->irq;
4932
2ed28baa
MM
4933 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4934 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4935 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4936 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
c8f44aff
MM
4937 if (highdma)
4938 netdev->hw_features |= NETIF_F_HIGHDMA;
4939 netdev->features |= netdev->hw_features;
b8ff05a9
DM
4940 netdev->vlan_features = netdev->features & VLAN_FEAT;
4941
01789349
JP
4942 netdev->priv_flags |= IFF_UNICAST_FLT;
4943
b8ff05a9
DM
4944 netdev->netdev_ops = &cxgb4_netdev_ops;
4945 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
4946 }
4947
4948 pci_set_drvdata(pdev, adapter);
4949
4950 if (adapter->flags & FW_OK) {
060e0c75 4951 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
4952 if (err)
4953 goto out_free_dev;
4954 }
4955
4956 /*
4957 * Configure queues and allocate tables now, they can be needed as
4958 * soon as the first register_netdev completes.
4959 */
4960 cfg_queues(adapter);
4961
4962 adapter->l2t = t4_init_l2t();
4963 if (!adapter->l2t) {
4964 /* We tolerate a lack of L2T, giving up some functionality */
4965 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4966 adapter->params.offload = 0;
4967 }
4968
4969 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4970 dev_warn(&pdev->dev, "could not allocate TID table, "
4971 "continuing\n");
4972 adapter->params.offload = 0;
4973 }
4974
f7cabcdd
DM
4975 /* See what interrupts we'll be using */
4976 if (msi > 1 && enable_msix(adapter) == 0)
4977 adapter->flags |= USING_MSIX;
4978 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4979 adapter->flags |= USING_MSI;
4980
671b0060
DM
4981 err = init_rss(adapter);
4982 if (err)
4983 goto out_free_dev;
4984
b8ff05a9
DM
4985 /*
4986 * The card is now ready to go. If any errors occur during device
4987 * registration we do not fail the whole card but rather proceed only
4988 * with the ports we manage to register successfully. However we must
4989 * register at least one net device.
4990 */
4991 for_each_port(adapter, i) {
a57cabe0
DM
4992 pi = adap2pinfo(adapter, i);
4993 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4994 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4995
b8ff05a9
DM
4996 err = register_netdev(adapter->port[i]);
4997 if (err)
b1a3c2b6 4998 break;
b1a3c2b6
DM
4999 adapter->chan_map[pi->tx_chan] = i;
5000 print_port_info(adapter->port[i]);
b8ff05a9 5001 }
b1a3c2b6 5002 if (i == 0) {
b8ff05a9
DM
5003 dev_err(&pdev->dev, "could not register any net devices\n");
5004 goto out_free_dev;
5005 }
b1a3c2b6
DM
5006 if (err) {
5007 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5008 err = 0;
6403eab1 5009 }
b8ff05a9
DM
5010
5011 if (cxgb4_debugfs_root) {
5012 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5013 cxgb4_debugfs_root);
5014 setup_debugfs(adapter);
5015 }
5016
6482aa7c
DLR
5017 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5018 pdev->needs_freset = 1;
5019
b8ff05a9
DM
5020 if (is_offload(adapter))
5021 attach_ulds(adapter);
5022
b8ff05a9
DM
5023sriov:
5024#ifdef CONFIG_PCI_IOV
5025 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
5026 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5027 dev_info(&pdev->dev,
5028 "instantiated %u virtual functions\n",
5029 num_vf[func]);
5030#endif
5031 return 0;
5032
5033 out_free_dev:
06546391 5034 free_some_resources(adapter);
b8ff05a9
DM
5035 out_unmap_bar:
5036 iounmap(adapter->regs);
5037 out_free_adapter:
5038 kfree(adapter);
5039 out_disable_device:
5040 pci_disable_pcie_error_reporting(pdev);
5041 pci_disable_device(pdev);
5042 out_release_regions:
5043 pci_release_regions(pdev);
5044 pci_set_drvdata(pdev, NULL);
5045 return err;
5046}
5047
91744948 5048static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
5049{
5050 struct adapter *adapter = pci_get_drvdata(pdev);
5051
636f9d37 5052#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
5053 pci_disable_sriov(pdev);
5054
636f9d37
VP
5055#endif
5056
b8ff05a9
DM
5057 if (adapter) {
5058 int i;
5059
5060 if (is_offload(adapter))
5061 detach_ulds(adapter);
5062
5063 for_each_port(adapter, i)
8f3a7676 5064 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
5065 unregister_netdev(adapter->port[i]);
5066
5067 if (adapter->debugfs_root)
5068 debugfs_remove_recursive(adapter->debugfs_root);
5069
f2b7e78d
VP
5070 /* If we allocated filters, free up state associated with any
5071 * valid filters ...
5072 */
5073 if (adapter->tids.ftid_tab) {
5074 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
5075 for (i = 0; i < (adapter->tids.nftids +
5076 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
5077 if (f->valid)
5078 clear_filter(adapter, f);
5079 }
5080
aaefae9b
DM
5081 if (adapter->flags & FULL_INIT_DONE)
5082 cxgb_down(adapter);
b8ff05a9 5083
06546391 5084 free_some_resources(adapter);
b8ff05a9
DM
5085 iounmap(adapter->regs);
5086 kfree(adapter);
5087 pci_disable_pcie_error_reporting(pdev);
5088 pci_disable_device(pdev);
5089 pci_release_regions(pdev);
5090 pci_set_drvdata(pdev, NULL);
a069ec91 5091 } else
b8ff05a9
DM
5092 pci_release_regions(pdev);
5093}
5094
5095static struct pci_driver cxgb4_driver = {
5096 .name = KBUILD_MODNAME,
5097 .id_table = cxgb4_pci_tbl,
5098 .probe = init_one,
91744948 5099 .remove = remove_one,
204dc3c0 5100 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
5101};
5102
5103static int __init cxgb4_init_module(void)
5104{
5105 int ret;
5106
3069ee9b
VP
5107 workq = create_singlethread_workqueue("cxgb4");
5108 if (!workq)
5109 return -ENOMEM;
5110
b8ff05a9
DM
5111 /* Debugfs support is optional, just warn if this fails */
5112 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5113 if (!cxgb4_debugfs_root)
5114 pr_warning("could not create debugfs entry, continuing\n");
5115
5116 ret = pci_register_driver(&cxgb4_driver);
5117 if (ret < 0)
5118 debugfs_remove(cxgb4_debugfs_root);
5119 return ret;
5120}
5121
5122static void __exit cxgb4_cleanup_module(void)
5123{
5124 pci_unregister_driver(&cxgb4_driver);
5125 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3069ee9b
VP
5126 flush_workqueue(workq);
5127 destroy_workqueue(workq);
b8ff05a9
DM
5128}
5129
5130module_init(cxgb4_init_module);
5131module_exit(cxgb4_cleanup_module);
This page took 0.991917 seconds and 5 git commands to generate.