cxgb4: large receive offload support
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
CommitLineData
b8ff05a9
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
ce100b8b 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
b8ff05a9
DM
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/bitmap.h>
38#include <linux/crc32.h>
39#include <linux/ctype.h>
40#include <linux/debugfs.h>
41#include <linux/err.h>
42#include <linux/etherdevice.h>
43#include <linux/firmware.h>
01789349 44#include <linux/if.h>
b8ff05a9
DM
45#include <linux/if_vlan.h>
46#include <linux/init.h>
47#include <linux/log2.h>
48#include <linux/mdio.h>
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/mutex.h>
52#include <linux/netdevice.h>
53#include <linux/pci.h>
54#include <linux/aer.h>
55#include <linux/rtnetlink.h>
56#include <linux/sched.h>
57#include <linux/seq_file.h>
58#include <linux/sockios.h>
59#include <linux/vmalloc.h>
60#include <linux/workqueue.h>
61#include <net/neighbour.h>
62#include <net/netevent.h>
01bcca68 63#include <net/addrconf.h>
1ef8019b 64#include <net/bonding.h>
b5a02f50 65#include <net/addrconf.h>
b8ff05a9
DM
66#include <asm/uaccess.h>
67
68#include "cxgb4.h"
69#include "t4_regs.h"
f612b815 70#include "t4_values.h"
b8ff05a9
DM
71#include "t4_msg.h"
72#include "t4fw_api.h"
cd6c2f12 73#include "t4fw_version.h"
688848b1 74#include "cxgb4_dcb.h"
fd88b31a 75#include "cxgb4_debugfs.h"
b5a02f50 76#include "clip_tbl.h"
b8ff05a9
DM
77#include "l2t.h"
78
812034f1
HS
79char cxgb4_driver_name[] = KBUILD_MODNAME;
80
01bcca68
VP
81#ifdef DRV_VERSION
82#undef DRV_VERSION
83#endif
3a7f8554 84#define DRV_VERSION "2.0.0-ko"
812034f1 85const char cxgb4_driver_version[] = DRV_VERSION;
52a5f846 86#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
b8ff05a9 87
f2b7e78d
VP
88/* Host shadow copy of ingress filter entry. This is in host native format
89 * and doesn't match the ordering or bit order, etc. of the hardware of the
90 * firmware command. The use of bit-field structure elements is purely to
91 * remind ourselves of the field size limitations and save memory in the case
92 * where the filter table is large.
93 */
94struct filter_entry {
95 /* Administrative fields for filter.
96 */
97 u32 valid:1; /* filter allocated and valid */
98 u32 locked:1; /* filter is administratively locked */
99
100 u32 pending:1; /* filter action is pending firmware reply */
101 u32 smtidx:8; /* Source MAC Table index for smac */
102 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
103
104 /* The filter itself. Most of this is a straight copy of information
105 * provided by the extended ioctl(). Some fields are translated to
106 * internal forms -- for instance the Ingress Queue ID passed in from
107 * the ioctl() is translated into the Absolute Ingress Queue ID.
108 */
109 struct ch_filter_specification fs;
110};
111
b8ff05a9
DM
112#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
113 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
114 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
115
3fedeab1
HS
116/* Macros needed to support the PCI Device ID Table ...
117 */
118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
768ffc66 119 static const struct pci_device_id cxgb4_pci_tbl[] = {
3fedeab1 120#define CH_PCI_DEVICE_ID_FUNCTION 0x4
b8ff05a9 121
3fedeab1
HS
122/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
123 * called for both.
124 */
125#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
126
127#define CH_PCI_ID_TABLE_ENTRY(devid) \
128 {PCI_VDEVICE(CHELSIO, (devid)), 4}
129
130#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
131 { 0, } \
132 }
133
134#include "t4_pci_id_tbl.h"
b8ff05a9 135
16e47624 136#define FW4_FNAME "cxgb4/t4fw.bin"
0a57a536 137#define FW5_FNAME "cxgb4/t5fw.bin"
3ccc6cf7 138#define FW6_FNAME "cxgb4/t6fw.bin"
16e47624 139#define FW4_CFNAME "cxgb4/t4-config.txt"
0a57a536 140#define FW5_CFNAME "cxgb4/t5-config.txt"
3ccc6cf7 141#define FW6_CFNAME "cxgb4/t6-config.txt"
01b69614
HS
142#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
143#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
144#define PHY_AQ1202_DEVICEID 0x4409
145#define PHY_BCM84834_DEVICEID 0x4486
b8ff05a9
DM
146
147MODULE_DESCRIPTION(DRV_DESC);
148MODULE_AUTHOR("Chelsio Communications");
149MODULE_LICENSE("Dual BSD/GPL");
150MODULE_VERSION(DRV_VERSION);
151MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
16e47624 152MODULE_FIRMWARE(FW4_FNAME);
0a57a536 153MODULE_FIRMWARE(FW5_FNAME);
52a5f846 154MODULE_FIRMWARE(FW6_FNAME);
b8ff05a9 155
636f9d37
VP
156/*
157 * Normally we're willing to become the firmware's Master PF but will be happy
158 * if another PF has already become the Master and initialized the adapter.
159 * Setting "force_init" will cause this driver to forcibly establish itself as
160 * the Master PF and initialize the adapter.
161 */
162static uint force_init;
163
164module_param(force_init, uint, 0644);
d7d3e25f
HS
165MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
166 "deprecated parameter");
13ee15d3 167
b8ff05a9
DM
168static int dflt_msg_enable = DFLT_MSG_ENABLE;
169
170module_param(dflt_msg_enable, int, 0644);
171MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
172
173/*
174 * The driver uses the best interrupt scheme available on a platform in the
175 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
176 * of these schemes the driver may consider as follows:
177 *
178 * msi = 2: choose from among all three options
179 * msi = 1: only consider MSI and INTx interrupts
180 * msi = 0: force INTx interrupts
181 */
182static int msi = 2;
183
184module_param(msi, int, 0644);
185MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
186
636f9d37
VP
187/*
188 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
189 * offset by 2 bytes in order to have the IP headers line up on 4-byte
190 * boundaries. This is a requirement for many architectures which will throw
191 * a machine check fault if an attempt is made to access one of the 4-byte IP
192 * header fields on a non-4-byte boundary. And it's a major performance issue
193 * even on some architectures which allow it like some implementations of the
194 * x86 ISA. However, some architectures don't mind this and for some very
195 * edge-case performance sensitive applications (like forwarding large volumes
196 * of small packets), setting this DMA offset to 0 will decrease the number of
197 * PCI-E Bus transfers enough to measurably affect performance.
198 */
199static int rx_dma_offset = 2;
200
b8ff05a9 201#ifdef CONFIG_PCI_IOV
7d6727cf
SR
202/* Configure the number of PCI-E Virtual Function which are to be instantiated
203 * on SR-IOV Capable Physical Functions.
0a57a536 204 */
7d6727cf 205static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
b8ff05a9
DM
206
207module_param_array(num_vf, uint, NULL, 0644);
7d6727cf 208MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
b8ff05a9
DM
209#endif
210
688848b1
AB
211/* TX Queue select used to determine what algorithm to use for selecting TX
212 * queue. Select between the kernel provided function (select_queue=0) or user
213 * cxgb_select_queue function (select_queue=1)
214 *
215 * Default: select_queue=0
216 */
217static int select_queue;
218module_param(select_queue, int, 0644);
219MODULE_PARM_DESC(select_queue,
220 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
221
b8ff05a9
DM
222static struct dentry *cxgb4_debugfs_root;
223
224static LIST_HEAD(adapter_list);
225static DEFINE_MUTEX(uld_mutex);
01bcca68
VP
226/* Adapter list to be accessed from atomic context */
227static LIST_HEAD(adap_rcu_list);
228static DEFINE_SPINLOCK(adap_rcu_lock);
b8ff05a9 229static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
f2692d16 230static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
b8ff05a9
DM
231
232static void link_report(struct net_device *dev)
233{
234 if (!netif_carrier_ok(dev))
235 netdev_info(dev, "link down\n");
236 else {
237 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
238
85412255 239 const char *s;
b8ff05a9
DM
240 const struct port_info *p = netdev_priv(dev);
241
242 switch (p->link_cfg.speed) {
e8b39015 243 case 10000:
b8ff05a9
DM
244 s = "10Gbps";
245 break;
e8b39015 246 case 1000:
b8ff05a9
DM
247 s = "1000Mbps";
248 break;
e8b39015 249 case 100:
b8ff05a9
DM
250 s = "100Mbps";
251 break;
e8b39015 252 case 40000:
72aca4bf
KS
253 s = "40Gbps";
254 break;
85412255
HS
255 default:
256 pr_info("%s: unsupported speed: %d\n",
257 dev->name, p->link_cfg.speed);
258 return;
b8ff05a9
DM
259 }
260
261 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
262 fc[p->link_cfg.fc]);
263 }
264}
265
688848b1
AB
266#ifdef CONFIG_CHELSIO_T4_DCB
267/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
268static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
269{
270 struct port_info *pi = netdev_priv(dev);
271 struct adapter *adap = pi->adapter;
272 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
273 int i;
274
275 /* We use a simple mapping of Port TX Queue Index to DCB
276 * Priority when we're enabling DCB.
277 */
278 for (i = 0; i < pi->nqsets; i++, txq++) {
279 u32 name, value;
280 int err;
281
5167865a
HS
282 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
283 FW_PARAMS_PARAM_X_V(
284 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
285 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
688848b1
AB
286 value = enable ? i : 0xffffffff;
287
288 /* Since we can be called while atomic (from "interrupt
289 * level") we need to issue the Set Parameters Commannd
290 * without sleeping (timeout < 0).
291 */
b2612722 292 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
01b69614
HS
293 &name, &value,
294 -FW_CMD_MAX_TIMEOUT);
688848b1
AB
295
296 if (err)
297 dev_err(adap->pdev_dev,
298 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
299 enable ? "set" : "unset", pi->port_id, i, -err);
10b00466
AB
300 else
301 txq->dcb_prio = value;
688848b1
AB
302 }
303}
304#endif /* CONFIG_CHELSIO_T4_DCB */
305
b8ff05a9
DM
306void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
307{
308 struct net_device *dev = adapter->port[port_id];
309
310 /* Skip changes from disabled ports. */
311 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
312 if (link_stat)
313 netif_carrier_on(dev);
688848b1
AB
314 else {
315#ifdef CONFIG_CHELSIO_T4_DCB
316 cxgb4_dcb_state_init(dev);
317 dcb_tx_queue_prio_enable(dev, false);
318#endif /* CONFIG_CHELSIO_T4_DCB */
b8ff05a9 319 netif_carrier_off(dev);
688848b1 320 }
b8ff05a9
DM
321
322 link_report(dev);
323 }
324}
325
326void t4_os_portmod_changed(const struct adapter *adap, int port_id)
327{
328 static const char *mod_str[] = {
a0881cab 329 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
b8ff05a9
DM
330 };
331
332 const struct net_device *dev = adap->port[port_id];
333 const struct port_info *pi = netdev_priv(dev);
334
335 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
336 netdev_info(dev, "port module unplugged\n");
a0881cab 337 else if (pi->mod_type < ARRAY_SIZE(mod_str))
b8ff05a9
DM
338 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
339}
340
341/*
342 * Configure the exact and hash address filters to handle a port's multicast
343 * and secondary unicast MAC addresses.
344 */
345static int set_addr_filters(const struct net_device *dev, bool sleep)
346{
347 u64 mhash = 0;
348 u64 uhash = 0;
349 bool free = true;
350 u16 filt_idx[7];
351 const u8 *addr[7];
352 int ret, naddr = 0;
b8ff05a9
DM
353 const struct netdev_hw_addr *ha;
354 int uc_cnt = netdev_uc_count(dev);
4a35ecf8 355 int mc_cnt = netdev_mc_count(dev);
b8ff05a9 356 const struct port_info *pi = netdev_priv(dev);
b2612722 357 unsigned int mb = pi->adapter->pf;
b8ff05a9
DM
358
359 /* first do the secondary unicast addresses */
360 netdev_for_each_uc_addr(ha, dev) {
361 addr[naddr++] = ha->addr;
362 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 363 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
364 naddr, addr, filt_idx, &uhash, sleep);
365 if (ret < 0)
366 return ret;
367
368 free = false;
369 naddr = 0;
370 }
371 }
372
373 /* next set up the multicast addresses */
4a35ecf8
DM
374 netdev_for_each_mc_addr(ha, dev) {
375 addr[naddr++] = ha->addr;
376 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
060e0c75 377 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
b8ff05a9
DM
378 naddr, addr, filt_idx, &mhash, sleep);
379 if (ret < 0)
380 return ret;
381
382 free = false;
383 naddr = 0;
384 }
385 }
386
060e0c75 387 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
b8ff05a9
DM
388 uhash | mhash, sleep);
389}
390
3069ee9b
VP
391int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
392module_param(dbfifo_int_thresh, int, 0644);
393MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
394
404d9e3f
VP
395/*
396 * usecs to sleep while draining the dbfifo
397 */
398static int dbfifo_drain_delay = 1000;
3069ee9b
VP
399module_param(dbfifo_drain_delay, int, 0644);
400MODULE_PARM_DESC(dbfifo_drain_delay,
401 "usecs to sleep while draining the dbfifo");
402
b8ff05a9
DM
403/*
404 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
405 * If @mtu is -1 it is left unchanged.
406 */
407static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
408{
409 int ret;
410 struct port_info *pi = netdev_priv(dev);
411
412 ret = set_addr_filters(dev, sleep_ok);
413 if (ret == 0)
b2612722 414 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
b8ff05a9 415 (dev->flags & IFF_PROMISC) ? 1 : 0,
f8f5aafa 416 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
b8ff05a9
DM
417 sleep_ok);
418 return ret;
419}
420
421/**
422 * link_start - enable a port
423 * @dev: the port to enable
424 *
425 * Performs the MAC and PHY actions needed to enable a port.
426 */
427static int link_start(struct net_device *dev)
428{
429 int ret;
430 struct port_info *pi = netdev_priv(dev);
b2612722 431 unsigned int mb = pi->adapter->pf;
b8ff05a9
DM
432
433 /*
434 * We do not set address filters and promiscuity here, the stack does
435 * that step explicitly.
436 */
060e0c75 437 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
f646968f 438 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
b8ff05a9 439 if (ret == 0) {
060e0c75 440 ret = t4_change_mac(pi->adapter, mb, pi->viid,
b8ff05a9 441 pi->xact_addr_filt, dev->dev_addr, true,
b6bd29e7 442 true);
b8ff05a9
DM
443 if (ret >= 0) {
444 pi->xact_addr_filt = ret;
445 ret = 0;
446 }
447 }
448 if (ret == 0)
4036da90 449 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
060e0c75 450 &pi->link_cfg);
30f00847
AB
451 if (ret == 0) {
452 local_bh_disable();
688848b1
AB
453 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
454 true, CXGB4_DCB_ENABLED);
30f00847
AB
455 local_bh_enable();
456 }
688848b1 457
b8ff05a9
DM
458 return ret;
459}
460
688848b1
AB
461int cxgb4_dcb_enabled(const struct net_device *dev)
462{
463#ifdef CONFIG_CHELSIO_T4_DCB
464 struct port_info *pi = netdev_priv(dev);
465
3bb06261
AB
466 if (!pi->dcb.enabled)
467 return 0;
468
469 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
470 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
688848b1
AB
471#else
472 return 0;
473#endif
474}
475EXPORT_SYMBOL(cxgb4_dcb_enabled);
476
477#ifdef CONFIG_CHELSIO_T4_DCB
478/* Handle a Data Center Bridging update message from the firmware. */
479static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
480{
2b5fb1f2 481 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
688848b1
AB
482 struct net_device *dev = adap->port[port];
483 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
484 int new_dcb_enabled;
485
486 cxgb4_dcb_handle_fw_update(adap, pcmd);
487 new_dcb_enabled = cxgb4_dcb_enabled(dev);
488
489 /* If the DCB has become enabled or disabled on the port then we're
490 * going to need to set up/tear down DCB Priority parameters for the
491 * TX Queues associated with the port.
492 */
493 if (new_dcb_enabled != old_dcb_enabled)
494 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
495}
496#endif /* CONFIG_CHELSIO_T4_DCB */
497
f2b7e78d
VP
498/* Clear a filter and release any of its resources that we own. This also
499 * clears the filter's "pending" status.
500 */
501static void clear_filter(struct adapter *adap, struct filter_entry *f)
502{
503 /* If the new or old filter have loopback rewriteing rules then we'll
504 * need to free any existing Layer Two Table (L2T) entries of the old
505 * filter rule. The firmware will handle freeing up any Source MAC
506 * Table (SMT) entries used for rewriting Source MAC Addresses in
507 * loopback rules.
508 */
509 if (f->l2t)
510 cxgb4_l2t_release(f->l2t);
511
512 /* The zeroing of the filter rule below clears the filter valid,
513 * pending, locked flags, l2t pointer, etc. so it's all we need for
514 * this operation.
515 */
516 memset(f, 0, sizeof(*f));
517}
518
519/* Handle a filter write/deletion reply.
520 */
521static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
522{
523 unsigned int idx = GET_TID(rpl);
524 unsigned int nidx = idx - adap->tids.ftid_base;
525 unsigned int ret;
526 struct filter_entry *f;
527
528 if (idx >= adap->tids.ftid_base && nidx <
529 (adap->tids.nftids + adap->tids.nsftids)) {
530 idx = nidx;
bdc590b9 531 ret = TCB_COOKIE_G(rpl->cookie);
f2b7e78d
VP
532 f = &adap->tids.ftid_tab[idx];
533
534 if (ret == FW_FILTER_WR_FLT_DELETED) {
535 /* Clear the filter when we get confirmation from the
536 * hardware that the filter has been deleted.
537 */
538 clear_filter(adap, f);
539 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
540 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
541 idx);
542 clear_filter(adap, f);
543 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
544 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
545 f->pending = 0; /* asynchronous setup completed */
546 f->valid = 1;
547 } else {
548 /* Something went wrong. Issue a warning about the
549 * problem and clear everything out.
550 */
551 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
552 idx, ret);
553 clear_filter(adap, f);
554 }
555 }
556}
557
558/* Response queue handler for the FW event queue.
b8ff05a9
DM
559 */
560static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
561 const struct pkt_gl *gl)
562{
563 u8 opcode = ((const struct rss_header *)rsp)->opcode;
564
565 rsp++; /* skip RSS header */
b407a4a9
VP
566
567 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
568 */
569 if (unlikely(opcode == CPL_FW4_MSG &&
570 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
571 rsp++;
572 opcode = ((const struct rss_header *)rsp)->opcode;
573 rsp++;
574 if (opcode != CPL_SGE_EGR_UPDATE) {
575 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
576 , opcode);
577 goto out;
578 }
579 }
580
b8ff05a9
DM
581 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
582 const struct cpl_sge_egr_update *p = (void *)rsp;
bdc590b9 583 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
e46dab4d 584 struct sge_txq *txq;
b8ff05a9 585
e46dab4d 586 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
b8ff05a9 587 txq->restarts++;
e46dab4d 588 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
b8ff05a9
DM
589 struct sge_eth_txq *eq;
590
591 eq = container_of(txq, struct sge_eth_txq, q);
592 netif_tx_wake_queue(eq->txq);
593 } else {
594 struct sge_ofld_txq *oq;
595
596 oq = container_of(txq, struct sge_ofld_txq, q);
597 tasklet_schedule(&oq->qresume_tsk);
598 }
599 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
600 const struct cpl_fw6_msg *p = (void *)rsp;
601
688848b1
AB
602#ifdef CONFIG_CHELSIO_T4_DCB
603 const struct fw_port_cmd *pcmd = (const void *)p->data;
e2ac9628 604 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
688848b1 605 unsigned int action =
2b5fb1f2 606 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
688848b1
AB
607
608 if (cmd == FW_PORT_CMD &&
609 action == FW_PORT_ACTION_GET_PORT_INFO) {
2b5fb1f2 610 int port = FW_PORT_CMD_PORTID_G(
688848b1
AB
611 be32_to_cpu(pcmd->op_to_portid));
612 struct net_device *dev = q->adap->port[port];
613 int state_input = ((pcmd->u.info.dcbxdis_pkd &
2b5fb1f2 614 FW_PORT_CMD_DCBXDIS_F)
688848b1
AB
615 ? CXGB4_DCB_INPUT_FW_DISABLED
616 : CXGB4_DCB_INPUT_FW_ENABLED);
617
618 cxgb4_dcb_state_fsm(dev, state_input);
619 }
620
621 if (cmd == FW_PORT_CMD &&
622 action == FW_PORT_ACTION_L2_DCB_CFG)
623 dcb_rpl(q->adap, pcmd);
624 else
625#endif
626 if (p->type == 0)
627 t4_handle_fw_rpl(q->adap, p->data);
b8ff05a9
DM
628 } else if (opcode == CPL_L2T_WRITE_RPL) {
629 const struct cpl_l2t_write_rpl *p = (void *)rsp;
630
631 do_l2t_write_rpl(q->adap, p);
f2b7e78d
VP
632 } else if (opcode == CPL_SET_TCB_RPL) {
633 const struct cpl_set_tcb_rpl *p = (void *)rsp;
634
635 filter_rpl(q->adap, p);
b8ff05a9
DM
636 } else
637 dev_err(q->adap->pdev_dev,
638 "unexpected CPL %#x on FW event queue\n", opcode);
b407a4a9 639out:
b8ff05a9
DM
640 return 0;
641}
642
2337ba42
VP
643/* Flush the aggregated lro sessions */
644static void uldrx_flush_handler(struct sge_rspq *q)
645{
646 if (ulds[q->uld].lro_flush)
647 ulds[q->uld].lro_flush(&q->lro_mgr);
648}
649
b8ff05a9
DM
650/**
651 * uldrx_handler - response queue handler for ULD queues
652 * @q: the response queue that received the packet
653 * @rsp: the response queue descriptor holding the offload message
654 * @gl: the gather list of packet fragments
655 *
656 * Deliver an ingress offload packet to a ULD. All processing is done by
657 * the ULD, we just maintain statistics.
658 */
659static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
660 const struct pkt_gl *gl)
661{
662 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
2337ba42 663 int ret;
b8ff05a9 664
b407a4a9
VP
665 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
666 */
667 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
668 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
669 rsp += 2;
670
2337ba42
VP
671 if (q->flush_handler)
672 ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
673 rsp, gl, &q->lro_mgr,
674 &q->napi);
675 else
676 ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
677 rsp, gl);
678
679 if (ret) {
b8ff05a9
DM
680 rxq->stats.nomem++;
681 return -1;
682 }
2337ba42 683
b8ff05a9
DM
684 if (gl == NULL)
685 rxq->stats.imm++;
686 else if (gl == CXGB4_MSG_AN)
687 rxq->stats.an++;
688 else
689 rxq->stats.pkts++;
690 return 0;
691}
692
693static void disable_msi(struct adapter *adapter)
694{
695 if (adapter->flags & USING_MSIX) {
696 pci_disable_msix(adapter->pdev);
697 adapter->flags &= ~USING_MSIX;
698 } else if (adapter->flags & USING_MSI) {
699 pci_disable_msi(adapter->pdev);
700 adapter->flags &= ~USING_MSI;
701 }
702}
703
704/*
705 * Interrupt handler for non-data events used with MSI-X.
706 */
707static irqreturn_t t4_nondata_intr(int irq, void *cookie)
708{
709 struct adapter *adap = cookie;
0d804338 710 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
b8ff05a9 711
0d804338 712 if (v & PFSW_F) {
b8ff05a9 713 adap->swintr = 1;
0d804338 714 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
b8ff05a9 715 }
c3c7b121
HS
716 if (adap->flags & MASTER_PF)
717 t4_slow_intr_handler(adap);
b8ff05a9
DM
718 return IRQ_HANDLED;
719}
720
721/*
722 * Name the MSI-X interrupts.
723 */
724static void name_msix_vecs(struct adapter *adap)
725{
ba27816c 726 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
b8ff05a9
DM
727
728 /* non-data interrupts */
b1a3c2b6 729 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
b8ff05a9
DM
730
731 /* FW events */
b1a3c2b6
DM
732 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
733 adap->port[0]->name);
b8ff05a9
DM
734
735 /* Ethernet queues */
736 for_each_port(adap, j) {
737 struct net_device *d = adap->port[j];
738 const struct port_info *pi = netdev_priv(d);
739
ba27816c 740 for (i = 0; i < pi->nqsets; i++, msi_idx++)
b8ff05a9
DM
741 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
742 d->name, i);
b8ff05a9
DM
743 }
744
745 /* offload queues */
f90ce561
HS
746 for_each_iscsirxq(&adap->sge, i)
747 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
b1a3c2b6 748 adap->port[0]->name, i);
ba27816c 749
f2692d16
VP
750 for_each_iscsitrxq(&adap->sge, i)
751 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
752 adap->port[0]->name, i);
753
ba27816c
DM
754 for_each_rdmarxq(&adap->sge, i)
755 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
b1a3c2b6 756 adap->port[0]->name, i);
cf38be6d
HS
757
758 for_each_rdmaciq(&adap->sge, i)
759 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
760 adap->port[0]->name, i);
b8ff05a9
DM
761}
762
763static int request_msix_queue_irqs(struct adapter *adap)
764{
765 struct sge *s = &adap->sge;
f90ce561 766 int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
f2692d16 767 int iscsitqidx = 0;
cf38be6d 768 int msi_index = 2;
b8ff05a9
DM
769
770 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
771 adap->msix_info[1].desc, &s->fw_evtq);
772 if (err)
773 return err;
774
775 for_each_ethrxq(s, ethqidx) {
404d9e3f
VP
776 err = request_irq(adap->msix_info[msi_index].vec,
777 t4_sge_intr_msix, 0,
778 adap->msix_info[msi_index].desc,
b8ff05a9
DM
779 &s->ethrxq[ethqidx].rspq);
780 if (err)
781 goto unwind;
404d9e3f 782 msi_index++;
b8ff05a9 783 }
f90ce561 784 for_each_iscsirxq(s, iscsiqidx) {
404d9e3f
VP
785 err = request_irq(adap->msix_info[msi_index].vec,
786 t4_sge_intr_msix, 0,
787 adap->msix_info[msi_index].desc,
f90ce561 788 &s->iscsirxq[iscsiqidx].rspq);
b8ff05a9
DM
789 if (err)
790 goto unwind;
404d9e3f 791 msi_index++;
b8ff05a9 792 }
f2692d16
VP
793 for_each_iscsitrxq(s, iscsitqidx) {
794 err = request_irq(adap->msix_info[msi_index].vec,
795 t4_sge_intr_msix, 0,
796 adap->msix_info[msi_index].desc,
797 &s->iscsitrxq[iscsitqidx].rspq);
798 if (err)
799 goto unwind;
800 msi_index++;
801 }
b8ff05a9 802 for_each_rdmarxq(s, rdmaqidx) {
404d9e3f
VP
803 err = request_irq(adap->msix_info[msi_index].vec,
804 t4_sge_intr_msix, 0,
805 adap->msix_info[msi_index].desc,
b8ff05a9
DM
806 &s->rdmarxq[rdmaqidx].rspq);
807 if (err)
808 goto unwind;
404d9e3f 809 msi_index++;
b8ff05a9 810 }
cf38be6d
HS
811 for_each_rdmaciq(s, rdmaciqqidx) {
812 err = request_irq(adap->msix_info[msi_index].vec,
813 t4_sge_intr_msix, 0,
814 adap->msix_info[msi_index].desc,
815 &s->rdmaciq[rdmaciqqidx].rspq);
816 if (err)
817 goto unwind;
818 msi_index++;
819 }
b8ff05a9
DM
820 return 0;
821
822unwind:
cf38be6d
HS
823 while (--rdmaciqqidx >= 0)
824 free_irq(adap->msix_info[--msi_index].vec,
825 &s->rdmaciq[rdmaciqqidx].rspq);
b8ff05a9 826 while (--rdmaqidx >= 0)
404d9e3f 827 free_irq(adap->msix_info[--msi_index].vec,
b8ff05a9 828 &s->rdmarxq[rdmaqidx].rspq);
f2692d16
VP
829 while (--iscsitqidx >= 0)
830 free_irq(adap->msix_info[--msi_index].vec,
831 &s->iscsitrxq[iscsitqidx].rspq);
f90ce561 832 while (--iscsiqidx >= 0)
404d9e3f 833 free_irq(adap->msix_info[--msi_index].vec,
f90ce561 834 &s->iscsirxq[iscsiqidx].rspq);
b8ff05a9 835 while (--ethqidx >= 0)
404d9e3f
VP
836 free_irq(adap->msix_info[--msi_index].vec,
837 &s->ethrxq[ethqidx].rspq);
b8ff05a9
DM
838 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
839 return err;
840}
841
842static void free_msix_queue_irqs(struct adapter *adap)
843{
404d9e3f 844 int i, msi_index = 2;
b8ff05a9
DM
845 struct sge *s = &adap->sge;
846
847 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
848 for_each_ethrxq(s, i)
404d9e3f 849 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
f90ce561
HS
850 for_each_iscsirxq(s, i)
851 free_irq(adap->msix_info[msi_index++].vec,
852 &s->iscsirxq[i].rspq);
f2692d16
VP
853 for_each_iscsitrxq(s, i)
854 free_irq(adap->msix_info[msi_index++].vec,
855 &s->iscsitrxq[i].rspq);
b8ff05a9 856 for_each_rdmarxq(s, i)
404d9e3f 857 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
cf38be6d
HS
858 for_each_rdmaciq(s, i)
859 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
b8ff05a9
DM
860}
861
671b0060 862/**
812034f1 863 * cxgb4_write_rss - write the RSS table for a given port
671b0060
DM
864 * @pi: the port
865 * @queues: array of queue indices for RSS
866 *
867 * Sets up the portion of the HW RSS table for the port's VI to distribute
868 * packets to the Rx queues in @queues.
c035e183 869 * Should never be called before setting up sge eth rx queues
671b0060 870 */
812034f1 871int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
671b0060
DM
872{
873 u16 *rss;
874 int i, err;
c035e183
HS
875 struct adapter *adapter = pi->adapter;
876 const struct sge_eth_rxq *rxq;
671b0060 877
c035e183 878 rxq = &adapter->sge.ethrxq[pi->first_qset];
671b0060
DM
879 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
880 if (!rss)
881 return -ENOMEM;
882
883 /* map the queue indices to queue ids */
884 for (i = 0; i < pi->rss_size; i++, queues++)
c035e183 885 rss[i] = rxq[*queues].rspq.abs_id;
671b0060 886
b2612722 887 err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
060e0c75 888 pi->rss_size, rss, pi->rss_size);
c035e183
HS
889 /* If Tunnel All Lookup isn't specified in the global RSS
890 * Configuration, then we need to specify a default Ingress
891 * Queue for any ingress packets which aren't hashed. We'll
892 * use our first ingress queue ...
893 */
894 if (!err)
895 err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
896 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
897 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
898 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
899 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
900 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
901 rss[0]);
671b0060
DM
902 kfree(rss);
903 return err;
904}
905
b8ff05a9
DM
906/**
907 * setup_rss - configure RSS
908 * @adap: the adapter
909 *
671b0060 910 * Sets up RSS for each port.
b8ff05a9
DM
911 */
912static int setup_rss(struct adapter *adap)
913{
c035e183 914 int i, j, err;
b8ff05a9
DM
915
916 for_each_port(adap, i) {
917 const struct port_info *pi = adap2pinfo(adap, i);
b8ff05a9 918
c035e183
HS
919 /* Fill default values with equal distribution */
920 for (j = 0; j < pi->rss_size; j++)
921 pi->rss[j] = j % pi->nqsets;
922
812034f1 923 err = cxgb4_write_rss(pi, pi->rss);
b8ff05a9
DM
924 if (err)
925 return err;
926 }
927 return 0;
928}
929
e46dab4d
DM
930/*
931 * Return the channel of the ingress queue with the given qid.
932 */
933static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
934{
935 qid -= p->ingr_start;
936 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
937}
938
b8ff05a9
DM
939/*
940 * Wait until all NAPI handlers are descheduled.
941 */
942static void quiesce_rx(struct adapter *adap)
943{
944 int i;
945
4b8e27a8 946 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
947 struct sge_rspq *q = adap->sge.ingr_map[i];
948
3a336cb1 949 if (q && q->handler) {
b8ff05a9 950 napi_disable(&q->napi);
3a336cb1
HS
951 local_bh_disable();
952 while (!cxgb_poll_lock_napi(q))
953 mdelay(1);
954 local_bh_enable();
955 }
956
b8ff05a9
DM
957 }
958}
959
b37987e8
HS
960/* Disable interrupt and napi handler */
961static void disable_interrupts(struct adapter *adap)
962{
963 if (adap->flags & FULL_INIT_DONE) {
964 t4_intr_disable(adap);
965 if (adap->flags & USING_MSIX) {
966 free_msix_queue_irqs(adap);
967 free_irq(adap->msix_info[0].vec, adap);
968 } else {
969 free_irq(adap->pdev->irq, adap);
970 }
971 quiesce_rx(adap);
972 }
973}
974
b8ff05a9
DM
975/*
976 * Enable NAPI scheduling and interrupt generation for all Rx queues.
977 */
978static void enable_rx(struct adapter *adap)
979{
980 int i;
981
4b8e27a8 982 for (i = 0; i < adap->sge.ingr_sz; i++) {
b8ff05a9
DM
983 struct sge_rspq *q = adap->sge.ingr_map[i];
984
985 if (!q)
986 continue;
3a336cb1
HS
987 if (q->handler) {
988 cxgb_busy_poll_init_lock(q);
b8ff05a9 989 napi_enable(&q->napi);
3a336cb1 990 }
b8ff05a9 991 /* 0-increment GTS to start the timer and enable interrupts */
f612b815
HS
992 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
993 SEINTARM_V(q->intr_params) |
994 INGRESSQID_V(q->cntxt_id));
b8ff05a9
DM
995 }
996}
997
1c6a5b0e
HS
998static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
999 unsigned int nq, unsigned int per_chan, int msi_idx,
2337ba42 1000 u16 *ids, bool lro)
1c6a5b0e
HS
1001{
1002 int i, err;
1003
1004 for (i = 0; i < nq; i++, q++) {
1005 if (msi_idx > 0)
1006 msi_idx++;
1007 err = t4_sge_alloc_rxq(adap, &q->rspq, false,
1008 adap->port[i / per_chan],
1009 msi_idx, q->fl.size ? &q->fl : NULL,
2337ba42
VP
1010 uldrx_handler,
1011 lro ? uldrx_flush_handler : NULL,
1012 0);
1c6a5b0e
HS
1013 if (err)
1014 return err;
1015 memset(&q->stats, 0, sizeof(q->stats));
1016 if (ids)
1017 ids[i] = q->rspq.abs_id;
1018 }
1019 return 0;
1020}
1021
b8ff05a9
DM
1022/**
1023 * setup_sge_queues - configure SGE Tx/Rx/response queues
1024 * @adap: the adapter
1025 *
1026 * Determines how many sets of SGE queues to use and initializes them.
1027 * We support multiple queue sets per port if we have MSI-X, otherwise
1028 * just one queue set per port.
1029 */
1030static int setup_sge_queues(struct adapter *adap)
1031{
1032 int err, msi_idx, i, j;
1033 struct sge *s = &adap->sge;
1034
4b8e27a8
HS
1035 bitmap_zero(s->starving_fl, s->egr_sz);
1036 bitmap_zero(s->txq_maperr, s->egr_sz);
b8ff05a9
DM
1037
1038 if (adap->flags & USING_MSIX)
1039 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1040 else {
1041 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
2337ba42 1042 NULL, NULL, NULL, -1);
b8ff05a9
DM
1043 if (err)
1044 return err;
1045 msi_idx = -((int)s->intrq.abs_id + 1);
1046 }
1047
4b8e27a8
HS
1048 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1049 * don't forget to update the following which need to be
1050 * synchronized to and changes here.
1051 *
1052 * 1. The calculations of MAX_INGQ in cxgb4.h.
1053 *
1054 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1055 * to accommodate any new/deleted Ingress Queues
1056 * which need MSI-X Vectors.
1057 *
1058 * 3. Update sge_qinfo_show() to include information on the
1059 * new/deleted queues.
1060 */
b8ff05a9 1061 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
2337ba42 1062 msi_idx, NULL, fwevtq_handler, NULL, -1);
b8ff05a9
DM
1063 if (err) {
1064freeout: t4_free_sge_resources(adap);
1065 return err;
1066 }
1067
1068 for_each_port(adap, i) {
1069 struct net_device *dev = adap->port[i];
1070 struct port_info *pi = netdev_priv(dev);
1071 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1072 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1073
1074 for (j = 0; j < pi->nqsets; j++, q++) {
1075 if (msi_idx > 0)
1076 msi_idx++;
1077 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1078 msi_idx, &q->fl,
145ef8a5 1079 t4_ethrx_handler,
2337ba42 1080 NULL,
145ef8a5
HS
1081 t4_get_mps_bg_map(adap,
1082 pi->tx_chan));
b8ff05a9
DM
1083 if (err)
1084 goto freeout;
1085 q->rspq.idx = j;
1086 memset(&q->stats, 0, sizeof(q->stats));
1087 }
1088 for (j = 0; j < pi->nqsets; j++, t++) {
1089 err = t4_sge_alloc_eth_txq(adap, t, dev,
1090 netdev_get_tx_queue(dev, j),
1091 s->fw_evtq.cntxt_id);
1092 if (err)
1093 goto freeout;
1094 }
1095 }
1096
f90ce561
HS
1097 j = s->iscsiqsets / adap->params.nports; /* iscsi queues per channel */
1098 for_each_iscsirxq(s, i) {
1c6a5b0e
HS
1099 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
1100 adap->port[i / j],
b8ff05a9
DM
1101 s->fw_evtq.cntxt_id);
1102 if (err)
1103 goto freeout;
1104 }
1105
2337ba42
VP
1106#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \
1107 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \
1c6a5b0e
HS
1108 if (err) \
1109 goto freeout; \
1110 if (msi_idx > 0) \
1111 msi_idx += nq; \
1112} while (0)
b8ff05a9 1113
2337ba42
VP
1114 ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false);
1115 ALLOC_OFLD_RXQS(s->iscsitrxq, s->niscsitq, j, s->iscsit_rxq, true);
1116 ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq, false);
f36e58e5 1117 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
2337ba42 1118 ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq, false);
b8ff05a9 1119
1c6a5b0e 1120#undef ALLOC_OFLD_RXQS
cf38be6d 1121
b8ff05a9
DM
1122 for_each_port(adap, i) {
1123 /*
1124 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1125 * have RDMA queues, and that's the right value.
1126 */
1127 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1128 s->fw_evtq.cntxt_id,
1129 s->rdmarxq[i].rspq.cntxt_id);
1130 if (err)
1131 goto freeout;
1132 }
1133
9bb59b96 1134 t4_write_reg(adap, is_t4(adap->params.chip) ?
837e4a42
HS
1135 MPS_TRC_RSS_CONTROL_A :
1136 MPS_T5_TRC_RSS_CONTROL_A,
1137 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1138 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
b8ff05a9
DM
1139 return 0;
1140}
1141
b8ff05a9
DM
1142/*
1143 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1144 * The allocated memory is cleared.
1145 */
1146void *t4_alloc_mem(size_t size)
1147{
8be04b93 1148 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
b8ff05a9
DM
1149
1150 if (!p)
89bf67f1 1151 p = vzalloc(size);
b8ff05a9
DM
1152 return p;
1153}
1154
1155/*
1156 * Free memory allocated through alloc_mem().
1157 */
fd88b31a 1158void t4_free_mem(void *addr)
b8ff05a9 1159{
d2fcb548 1160 kvfree(addr);
b8ff05a9
DM
1161}
1162
f2b7e78d
VP
1163/* Send a Work Request to write the filter at a specified index. We construct
1164 * a Firmware Filter Work Request to have the work done and put the indicated
1165 * filter into "pending" mode which will prevent any further actions against
1166 * it till we get a reply from the firmware on the completion status of the
1167 * request.
1168 */
1169static int set_filter_wr(struct adapter *adapter, int fidx)
1170{
1171 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1172 struct sk_buff *skb;
1173 struct fw_filter_wr *fwr;
1174 unsigned int ftid;
1175
f72f116a
MH
1176 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
1177 if (!skb)
1178 return -ENOMEM;
1179
f2b7e78d
VP
1180 /* If the new filter requires loopback Destination MAC and/or VLAN
1181 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1182 * the filter.
1183 */
1184 if (f->fs.newdmac || f->fs.newvlan) {
1185 /* allocate L2T entry for new filter */
f7502659
HS
1186 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1187 f->fs.eport, f->fs.dmac);
f72f116a 1188 if (f->l2t == NULL) {
f72f116a 1189 kfree_skb(skb);
f2b7e78d
VP
1190 return -ENOMEM;
1191 }
1192 }
1193
1194 ftid = adapter->tids.ftid_base + fidx;
1195
f2b7e78d
VP
1196 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1197 memset(fwr, 0, sizeof(*fwr));
1198
1199 /* It would be nice to put most of the following in t4_hw.c but most
1200 * of the work is translating the cxgbtool ch_filter_specification
1201 * into the Work Request and the definition of that structure is
1202 * currently in cxgbtool.h which isn't appropriate to pull into the
1203 * common code. We may eventually try to come up with a more neutral
1204 * filter specification structure but for now it's easiest to simply
1205 * put this fairly direct code in line ...
1206 */
e2ac9628
HS
1207 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1208 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
f2b7e78d 1209 fwr->tid_to_iq =
77a80e23
HS
1210 htonl(FW_FILTER_WR_TID_V(ftid) |
1211 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1212 FW_FILTER_WR_NOREPLY_V(0) |
1213 FW_FILTER_WR_IQ_V(f->fs.iq));
f2b7e78d 1214 fwr->del_filter_to_l2tix =
77a80e23
HS
1215 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1216 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1217 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1218 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1219 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1220 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1221 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1222 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1223 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f2b7e78d 1224 f->fs.newvlan == VLAN_REWRITE) |
77a80e23 1225 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
f2b7e78d 1226 f->fs.newvlan == VLAN_REWRITE) |
77a80e23
HS
1227 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1228 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1229 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1230 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
f2b7e78d
VP
1231 fwr->ethtype = htons(f->fs.val.ethtype);
1232 fwr->ethtypem = htons(f->fs.mask.ethtype);
1233 fwr->frag_to_ovlan_vldm =
77a80e23
HS
1234 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1235 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1236 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1237 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1238 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1239 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
f2b7e78d
VP
1240 fwr->smac_sel = 0;
1241 fwr->rx_chan_rx_rpl_iq =
77a80e23
HS
1242 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1243 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
f2b7e78d 1244 fwr->maci_to_matchtypem =
77a80e23
HS
1245 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1246 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1247 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1248 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1249 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1250 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1251 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1252 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
f2b7e78d
VP
1253 fwr->ptcl = f->fs.val.proto;
1254 fwr->ptclm = f->fs.mask.proto;
1255 fwr->ttyp = f->fs.val.tos;
1256 fwr->ttypm = f->fs.mask.tos;
1257 fwr->ivlan = htons(f->fs.val.ivlan);
1258 fwr->ivlanm = htons(f->fs.mask.ivlan);
1259 fwr->ovlan = htons(f->fs.val.ovlan);
1260 fwr->ovlanm = htons(f->fs.mask.ovlan);
1261 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1262 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1263 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1264 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1265 fwr->lp = htons(f->fs.val.lport);
1266 fwr->lpm = htons(f->fs.mask.lport);
1267 fwr->fp = htons(f->fs.val.fport);
1268 fwr->fpm = htons(f->fs.mask.fport);
1269 if (f->fs.newsmac)
1270 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1271
1272 /* Mark the filter as "pending" and ship off the Filter Work Request.
1273 * When we get the Work Request Reply we'll clear the pending status.
1274 */
1275 f->pending = 1;
1276 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1277 t4_ofld_send(adapter, skb);
1278 return 0;
1279}
1280
1281/* Delete the filter at a specified index.
1282 */
1283static int del_filter_wr(struct adapter *adapter, int fidx)
1284{
1285 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1286 struct sk_buff *skb;
1287 struct fw_filter_wr *fwr;
1288 unsigned int len, ftid;
1289
1290 len = sizeof(*fwr);
1291 ftid = adapter->tids.ftid_base + fidx;
1292
f72f116a
MH
1293 skb = alloc_skb(len, GFP_KERNEL);
1294 if (!skb)
1295 return -ENOMEM;
1296
f2b7e78d
VP
1297 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1298 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1299
1300 /* Mark the filter as "pending" and ship off the Filter Work Request.
1301 * When we get the Work Request Reply we'll clear the pending status.
1302 */
1303 f->pending = 1;
1304 t4_mgmt_tx(adapter, skb);
1305 return 0;
1306}
1307
688848b1
AB
1308static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1309 void *accel_priv, select_queue_fallback_t fallback)
1310{
1311 int txq;
1312
1313#ifdef CONFIG_CHELSIO_T4_DCB
1314 /* If a Data Center Bridging has been successfully negotiated on this
1315 * link then we'll use the skb's priority to map it to a TX Queue.
1316 * The skb's priority is determined via the VLAN Tag Priority Code
1317 * Point field.
1318 */
1319 if (cxgb4_dcb_enabled(dev)) {
1320 u16 vlan_tci;
1321 int err;
1322
1323 err = vlan_get_tag(skb, &vlan_tci);
1324 if (unlikely(err)) {
1325 if (net_ratelimit())
1326 netdev_warn(dev,
1327 "TX Packet without VLAN Tag on DCB Link\n");
1328 txq = 0;
1329 } else {
1330 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
84a200b3
VP
1331#ifdef CONFIG_CHELSIO_T4_FCOE
1332 if (skb->protocol == htons(ETH_P_FCOE))
1333 txq = skb->priority & 0x7;
1334#endif /* CONFIG_CHELSIO_T4_FCOE */
688848b1
AB
1335 }
1336 return txq;
1337 }
1338#endif /* CONFIG_CHELSIO_T4_DCB */
1339
1340 if (select_queue) {
1341 txq = (skb_rx_queue_recorded(skb)
1342 ? skb_get_rx_queue(skb)
1343 : smp_processor_id());
1344
1345 while (unlikely(txq >= dev->real_num_tx_queues))
1346 txq -= dev->real_num_tx_queues;
1347
1348 return txq;
1349 }
1350
1351 return fallback(dev, skb) % dev->real_num_tx_queues;
1352}
1353
b8ff05a9
DM
1354static int closest_timer(const struct sge *s, int time)
1355{
1356 int i, delta, match = 0, min_delta = INT_MAX;
1357
1358 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1359 delta = time - s->timer_val[i];
1360 if (delta < 0)
1361 delta = -delta;
1362 if (delta < min_delta) {
1363 min_delta = delta;
1364 match = i;
1365 }
1366 }
1367 return match;
1368}
1369
1370static int closest_thres(const struct sge *s, int thres)
1371{
1372 int i, delta, match = 0, min_delta = INT_MAX;
1373
1374 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1375 delta = thres - s->counter_val[i];
1376 if (delta < 0)
1377 delta = -delta;
1378 if (delta < min_delta) {
1379 min_delta = delta;
1380 match = i;
1381 }
1382 }
1383 return match;
1384}
1385
b8ff05a9 1386/**
812034f1 1387 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
b8ff05a9
DM
1388 * @q: the Rx queue
1389 * @us: the hold-off time in us, or 0 to disable timer
1390 * @cnt: the hold-off packet count, or 0 to disable counter
1391 *
1392 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1393 * one of the two needs to be enabled for the queue to generate interrupts.
1394 */
812034f1
HS
1395int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1396 unsigned int us, unsigned int cnt)
b8ff05a9 1397{
c887ad0e
HS
1398 struct adapter *adap = q->adap;
1399
b8ff05a9
DM
1400 if ((us | cnt) == 0)
1401 cnt = 1;
1402
1403 if (cnt) {
1404 int err;
1405 u32 v, new_idx;
1406
1407 new_idx = closest_thres(&adap->sge, cnt);
1408 if (q->desc && q->pktcnt_idx != new_idx) {
1409 /* the queue has already been created, update it */
5167865a
HS
1410 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1411 FW_PARAMS_PARAM_X_V(
1412 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1413 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
b2612722
HS
1414 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1415 &v, &new_idx);
b8ff05a9
DM
1416 if (err)
1417 return err;
1418 }
1419 q->pktcnt_idx = new_idx;
1420 }
1421
1422 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1ecc7b7a 1423 q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
b8ff05a9
DM
1424 return 0;
1425}
1426
c8f44aff 1427static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
87b6cf51 1428{
2ed28baa 1429 const struct port_info *pi = netdev_priv(dev);
c8f44aff 1430 netdev_features_t changed = dev->features ^ features;
19ecae2c 1431 int err;
19ecae2c 1432
f646968f 1433 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2ed28baa 1434 return 0;
19ecae2c 1435
b2612722 1436 err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
2ed28baa 1437 -1, -1, -1,
f646968f 1438 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2ed28baa 1439 if (unlikely(err))
f646968f 1440 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
19ecae2c 1441 return err;
87b6cf51
DM
1442}
1443
91744948 1444static int setup_debugfs(struct adapter *adap)
b8ff05a9 1445{
b8ff05a9
DM
1446 if (IS_ERR_OR_NULL(adap->debugfs_root))
1447 return -1;
1448
fd88b31a
HS
1449#ifdef CONFIG_DEBUG_FS
1450 t4_setup_debugfs(adap);
1451#endif
b8ff05a9
DM
1452 return 0;
1453}
1454
1455/*
1456 * upper-layer driver support
1457 */
1458
1459/*
1460 * Allocate an active-open TID and set it to the supplied value.
1461 */
1462int cxgb4_alloc_atid(struct tid_info *t, void *data)
1463{
1464 int atid = -1;
1465
1466 spin_lock_bh(&t->atid_lock);
1467 if (t->afree) {
1468 union aopen_entry *p = t->afree;
1469
f2b7e78d 1470 atid = (p - t->atid_tab) + t->atid_base;
b8ff05a9
DM
1471 t->afree = p->next;
1472 p->data = data;
1473 t->atids_in_use++;
1474 }
1475 spin_unlock_bh(&t->atid_lock);
1476 return atid;
1477}
1478EXPORT_SYMBOL(cxgb4_alloc_atid);
1479
1480/*
1481 * Release an active-open TID.
1482 */
1483void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1484{
f2b7e78d 1485 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
b8ff05a9
DM
1486
1487 spin_lock_bh(&t->atid_lock);
1488 p->next = t->afree;
1489 t->afree = p;
1490 t->atids_in_use--;
1491 spin_unlock_bh(&t->atid_lock);
1492}
1493EXPORT_SYMBOL(cxgb4_free_atid);
1494
1495/*
1496 * Allocate a server TID and set it to the supplied value.
1497 */
1498int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1499{
1500 int stid;
1501
1502 spin_lock_bh(&t->stid_lock);
1503 if (family == PF_INET) {
1504 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1505 if (stid < t->nstids)
1506 __set_bit(stid, t->stid_bmap);
1507 else
1508 stid = -1;
1509 } else {
a99c683e 1510 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
b8ff05a9
DM
1511 if (stid < 0)
1512 stid = -1;
1513 }
1514 if (stid >= 0) {
1515 t->stid_tab[stid].data = data;
1516 stid += t->stid_base;
15f63b74
KS
1517 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1518 * This is equivalent to 4 TIDs. With CLIP enabled it
1519 * needs 2 TIDs.
1520 */
1521 if (family == PF_INET)
1522 t->stids_in_use++;
1523 else
a99c683e 1524 t->stids_in_use += 2;
b8ff05a9
DM
1525 }
1526 spin_unlock_bh(&t->stid_lock);
1527 return stid;
1528}
1529EXPORT_SYMBOL(cxgb4_alloc_stid);
1530
dca4faeb
VP
1531/* Allocate a server filter TID and set it to the supplied value.
1532 */
1533int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1534{
1535 int stid;
1536
1537 spin_lock_bh(&t->stid_lock);
1538 if (family == PF_INET) {
1539 stid = find_next_zero_bit(t->stid_bmap,
1540 t->nstids + t->nsftids, t->nstids);
1541 if (stid < (t->nstids + t->nsftids))
1542 __set_bit(stid, t->stid_bmap);
1543 else
1544 stid = -1;
1545 } else {
1546 stid = -1;
1547 }
1548 if (stid >= 0) {
1549 t->stid_tab[stid].data = data;
470c60c4
KS
1550 stid -= t->nstids;
1551 stid += t->sftid_base;
2248b293 1552 t->sftids_in_use++;
dca4faeb
VP
1553 }
1554 spin_unlock_bh(&t->stid_lock);
1555 return stid;
1556}
1557EXPORT_SYMBOL(cxgb4_alloc_sftid);
1558
1559/* Release a server TID.
b8ff05a9
DM
1560 */
1561void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1562{
470c60c4
KS
1563 /* Is it a server filter TID? */
1564 if (t->nsftids && (stid >= t->sftid_base)) {
1565 stid -= t->sftid_base;
1566 stid += t->nstids;
1567 } else {
1568 stid -= t->stid_base;
1569 }
1570
b8ff05a9
DM
1571 spin_lock_bh(&t->stid_lock);
1572 if (family == PF_INET)
1573 __clear_bit(stid, t->stid_bmap);
1574 else
a99c683e 1575 bitmap_release_region(t->stid_bmap, stid, 1);
b8ff05a9 1576 t->stid_tab[stid].data = NULL;
2248b293
HS
1577 if (stid < t->nstids) {
1578 if (family == PF_INET)
1579 t->stids_in_use--;
1580 else
a99c683e 1581 t->stids_in_use -= 2;
2248b293
HS
1582 } else {
1583 t->sftids_in_use--;
1584 }
b8ff05a9
DM
1585 spin_unlock_bh(&t->stid_lock);
1586}
1587EXPORT_SYMBOL(cxgb4_free_stid);
1588
1589/*
1590 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1591 */
1592static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1593 unsigned int tid)
1594{
1595 struct cpl_tid_release *req;
1596
1597 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1598 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1599 INIT_TP_WR(req, tid);
1600 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1601}
1602
1603/*
1604 * Queue a TID release request and if necessary schedule a work queue to
1605 * process it.
1606 */
31b9c19b 1607static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1608 unsigned int tid)
b8ff05a9
DM
1609{
1610 void **p = &t->tid_tab[tid];
1611 struct adapter *adap = container_of(t, struct adapter, tids);
1612
1613 spin_lock_bh(&adap->tid_release_lock);
1614 *p = adap->tid_release_head;
1615 /* Low 2 bits encode the Tx channel number */
1616 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1617 if (!adap->tid_release_task_busy) {
1618 adap->tid_release_task_busy = true;
29aaee65 1619 queue_work(adap->workq, &adap->tid_release_task);
b8ff05a9
DM
1620 }
1621 spin_unlock_bh(&adap->tid_release_lock);
1622}
b8ff05a9
DM
1623
1624/*
1625 * Process the list of pending TID release requests.
1626 */
1627static void process_tid_release_list(struct work_struct *work)
1628{
1629 struct sk_buff *skb;
1630 struct adapter *adap;
1631
1632 adap = container_of(work, struct adapter, tid_release_task);
1633
1634 spin_lock_bh(&adap->tid_release_lock);
1635 while (adap->tid_release_head) {
1636 void **p = adap->tid_release_head;
1637 unsigned int chan = (uintptr_t)p & 3;
1638 p = (void *)p - chan;
1639
1640 adap->tid_release_head = *p;
1641 *p = NULL;
1642 spin_unlock_bh(&adap->tid_release_lock);
1643
1644 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1645 GFP_KERNEL)))
1646 schedule_timeout_uninterruptible(1);
1647
1648 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1649 t4_ofld_send(adap, skb);
1650 spin_lock_bh(&adap->tid_release_lock);
1651 }
1652 adap->tid_release_task_busy = false;
1653 spin_unlock_bh(&adap->tid_release_lock);
1654}
1655
1656/*
1657 * Release a TID and inform HW. If we are unable to allocate the release
1658 * message we defer to a work queue.
1659 */
1660void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
1661{
b8ff05a9
DM
1662 struct sk_buff *skb;
1663 struct adapter *adap = container_of(t, struct adapter, tids);
1664
9a1bb9f6
HS
1665 WARN_ON(tid >= t->ntids);
1666
1667 if (t->tid_tab[tid]) {
1668 t->tid_tab[tid] = NULL;
1669 if (t->hash_base && (tid >= t->hash_base))
1670 atomic_dec(&t->hash_tids_in_use);
1671 else
1672 atomic_dec(&t->tids_in_use);
1673 }
1674
b8ff05a9
DM
1675 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1676 if (likely(skb)) {
b8ff05a9
DM
1677 mk_tid_release(skb, chan, tid);
1678 t4_ofld_send(adap, skb);
1679 } else
1680 cxgb4_queue_tid_release(t, chan, tid);
b8ff05a9
DM
1681}
1682EXPORT_SYMBOL(cxgb4_remove_tid);
1683
1684/*
1685 * Allocate and initialize the TID tables. Returns 0 on success.
1686 */
1687static int tid_init(struct tid_info *t)
1688{
1689 size_t size;
f2b7e78d 1690 unsigned int stid_bmap_size;
b8ff05a9 1691 unsigned int natids = t->natids;
b6f8eaec 1692 struct adapter *adap = container_of(t, struct adapter, tids);
b8ff05a9 1693
dca4faeb 1694 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
f2b7e78d
VP
1695 size = t->ntids * sizeof(*t->tid_tab) +
1696 natids * sizeof(*t->atid_tab) +
b8ff05a9 1697 t->nstids * sizeof(*t->stid_tab) +
dca4faeb 1698 t->nsftids * sizeof(*t->stid_tab) +
f2b7e78d 1699 stid_bmap_size * sizeof(long) +
dca4faeb
VP
1700 t->nftids * sizeof(*t->ftid_tab) +
1701 t->nsftids * sizeof(*t->ftid_tab);
f2b7e78d 1702
b8ff05a9
DM
1703 t->tid_tab = t4_alloc_mem(size);
1704 if (!t->tid_tab)
1705 return -ENOMEM;
1706
1707 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1708 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
dca4faeb 1709 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
f2b7e78d 1710 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
b8ff05a9
DM
1711 spin_lock_init(&t->stid_lock);
1712 spin_lock_init(&t->atid_lock);
1713
1714 t->stids_in_use = 0;
2248b293 1715 t->sftids_in_use = 0;
b8ff05a9
DM
1716 t->afree = NULL;
1717 t->atids_in_use = 0;
1718 atomic_set(&t->tids_in_use, 0);
9a1bb9f6 1719 atomic_set(&t->hash_tids_in_use, 0);
b8ff05a9
DM
1720
1721 /* Setup the free list for atid_tab and clear the stid bitmap. */
1722 if (natids) {
1723 while (--natids)
1724 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1725 t->afree = t->atid_tab;
1726 }
dca4faeb 1727 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
b6f8eaec
KS
1728 /* Reserve stid 0 for T4/T5 adapters */
1729 if (!t->stid_base &&
3ccc6cf7 1730 (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
b6f8eaec
KS
1731 __set_bit(0, t->stid_bmap);
1732
b8ff05a9
DM
1733 return 0;
1734}
1735
1736/**
1737 * cxgb4_create_server - create an IP server
1738 * @dev: the device
1739 * @stid: the server TID
1740 * @sip: local IP address to bind server to
1741 * @sport: the server's TCP port
1742 * @queue: queue to direct messages from this server to
1743 *
1744 * Create an IP server for the given port and address.
1745 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1746 */
1747int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
793dad94
VP
1748 __be32 sip, __be16 sport, __be16 vlan,
1749 unsigned int queue)
b8ff05a9
DM
1750{
1751 unsigned int chan;
1752 struct sk_buff *skb;
1753 struct adapter *adap;
1754 struct cpl_pass_open_req *req;
80f40c1f 1755 int ret;
b8ff05a9
DM
1756
1757 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1758 if (!skb)
1759 return -ENOMEM;
1760
1761 adap = netdev2adap(dev);
1762 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
1763 INIT_TP_WR(req, 0);
1764 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1765 req->local_port = sport;
1766 req->peer_port = htons(0);
1767 req->local_ip = sip;
1768 req->peer_ip = htonl(0);
e46dab4d 1769 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1770 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1771 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1772 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1773 ret = t4_mgmt_tx(adap, skb);
1774 return net_xmit_eval(ret);
b8ff05a9
DM
1775}
1776EXPORT_SYMBOL(cxgb4_create_server);
1777
80f40c1f
VP
1778/* cxgb4_create_server6 - create an IPv6 server
1779 * @dev: the device
1780 * @stid: the server TID
1781 * @sip: local IPv6 address to bind server to
1782 * @sport: the server's TCP port
1783 * @queue: queue to direct messages from this server to
1784 *
1785 * Create an IPv6 server for the given port and address.
1786 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1787 */
1788int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1789 const struct in6_addr *sip, __be16 sport,
1790 unsigned int queue)
1791{
1792 unsigned int chan;
1793 struct sk_buff *skb;
1794 struct adapter *adap;
1795 struct cpl_pass_open_req6 *req;
1796 int ret;
1797
1798 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1799 if (!skb)
1800 return -ENOMEM;
1801
1802 adap = netdev2adap(dev);
1803 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
1804 INIT_TP_WR(req, 0);
1805 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1806 req->local_port = sport;
1807 req->peer_port = htons(0);
1808 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1809 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1810 req->peer_ip_hi = cpu_to_be64(0);
1811 req->peer_ip_lo = cpu_to_be64(0);
1812 chan = rxq_to_chan(&adap->sge, queue);
d7990b0c 1813 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
6c53e938
HS
1814 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1815 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
80f40c1f
VP
1816 ret = t4_mgmt_tx(adap, skb);
1817 return net_xmit_eval(ret);
1818}
1819EXPORT_SYMBOL(cxgb4_create_server6);
1820
1821int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1822 unsigned int queue, bool ipv6)
1823{
1824 struct sk_buff *skb;
1825 struct adapter *adap;
1826 struct cpl_close_listsvr_req *req;
1827 int ret;
1828
1829 adap = netdev2adap(dev);
1830
1831 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1832 if (!skb)
1833 return -ENOMEM;
1834
1835 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
1836 INIT_TP_WR(req, 0);
1837 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
bdc590b9
HS
1838 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
1839 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
80f40c1f
VP
1840 ret = t4_mgmt_tx(adap, skb);
1841 return net_xmit_eval(ret);
1842}
1843EXPORT_SYMBOL(cxgb4_remove_server);
1844
b8ff05a9
DM
1845/**
1846 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1847 * @mtus: the HW MTU table
1848 * @mtu: the target MTU
1849 * @idx: index of selected entry in the MTU table
1850 *
1851 * Returns the index and the value in the HW MTU table that is closest to
1852 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1853 * table, in which case that smallest available value is selected.
1854 */
1855unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
1856 unsigned int *idx)
1857{
1858 unsigned int i = 0;
1859
1860 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
1861 ++i;
1862 if (idx)
1863 *idx = i;
1864 return mtus[i];
1865}
1866EXPORT_SYMBOL(cxgb4_best_mtu);
1867
92e7ae71
HS
1868/**
1869 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1870 * @mtus: the HW MTU table
1871 * @header_size: Header Size
1872 * @data_size_max: maximum Data Segment Size
1873 * @data_size_align: desired Data Segment Size Alignment (2^N)
1874 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1875 *
1876 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1877 * MTU Table based solely on a Maximum MTU parameter, we break that
1878 * parameter up into a Header Size and Maximum Data Segment Size, and
1879 * provide a desired Data Segment Size Alignment. If we find an MTU in
1880 * the Hardware MTU Table which will result in a Data Segment Size with
1881 * the requested alignment _and_ that MTU isn't "too far" from the
1882 * closest MTU, then we'll return that rather than the closest MTU.
1883 */
1884unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
1885 unsigned short header_size,
1886 unsigned short data_size_max,
1887 unsigned short data_size_align,
1888 unsigned int *mtu_idxp)
1889{
1890 unsigned short max_mtu = header_size + data_size_max;
1891 unsigned short data_size_align_mask = data_size_align - 1;
1892 int mtu_idx, aligned_mtu_idx;
1893
1894 /* Scan the MTU Table till we find an MTU which is larger than our
1895 * Maximum MTU or we reach the end of the table. Along the way,
1896 * record the last MTU found, if any, which will result in a Data
1897 * Segment Length matching the requested alignment.
1898 */
1899 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
1900 unsigned short data_size = mtus[mtu_idx] - header_size;
1901
1902 /* If this MTU minus the Header Size would result in a
1903 * Data Segment Size of the desired alignment, remember it.
1904 */
1905 if ((data_size & data_size_align_mask) == 0)
1906 aligned_mtu_idx = mtu_idx;
1907
1908 /* If we're not at the end of the Hardware MTU Table and the
1909 * next element is larger than our Maximum MTU, drop out of
1910 * the loop.
1911 */
1912 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
1913 break;
1914 }
1915
1916 /* If we fell out of the loop because we ran to the end of the table,
1917 * then we just have to use the last [largest] entry.
1918 */
1919 if (mtu_idx == NMTUS)
1920 mtu_idx--;
1921
1922 /* If we found an MTU which resulted in the requested Data Segment
1923 * Length alignment and that's "not far" from the largest MTU which is
1924 * less than or equal to the maximum MTU, then use that.
1925 */
1926 if (aligned_mtu_idx >= 0 &&
1927 mtu_idx - aligned_mtu_idx <= 1)
1928 mtu_idx = aligned_mtu_idx;
1929
1930 /* If the caller has passed in an MTU Index pointer, pass the
1931 * MTU Index back. Return the MTU value.
1932 */
1933 if (mtu_idxp)
1934 *mtu_idxp = mtu_idx;
1935 return mtus[mtu_idx];
1936}
1937EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
1938
27999805
H
1939/**
1940 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1941 * @chip: chip type
1942 * @viid: VI id of the given port
1943 *
1944 * Return the SMT index for this VI.
1945 */
1946unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
1947{
1948 /* In T4/T5, SMT contains 256 SMAC entries organized in
1949 * 128 rows of 2 entries each.
1950 * In T6, SMT contains 256 SMAC entries in 256 rows.
1951 * TODO: The below code needs to be updated when we add support
1952 * for 256 VFs.
1953 */
1954 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1955 return ((viid & 0x7f) << 1);
1956 else
1957 return (viid & 0x7f);
1958}
1959EXPORT_SYMBOL(cxgb4_tp_smt_idx);
1960
b8ff05a9
DM
1961/**
1962 * cxgb4_port_chan - get the HW channel of a port
1963 * @dev: the net device for the port
1964 *
1965 * Return the HW Tx channel of the given port.
1966 */
1967unsigned int cxgb4_port_chan(const struct net_device *dev)
1968{
1969 return netdev2pinfo(dev)->tx_chan;
1970}
1971EXPORT_SYMBOL(cxgb4_port_chan);
1972
881806bc
VP
1973unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
1974{
1975 struct adapter *adap = netdev2adap(dev);
2cc301d2 1976 u32 v1, v2, lp_count, hp_count;
881806bc 1977
f061de42
HS
1978 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
1979 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 1980 if (is_t4(adap->params.chip)) {
f061de42
HS
1981 lp_count = LP_COUNT_G(v1);
1982 hp_count = HP_COUNT_G(v1);
2cc301d2 1983 } else {
f061de42
HS
1984 lp_count = LP_COUNT_T5_G(v1);
1985 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
1986 }
1987 return lpfifo ? lp_count : hp_count;
881806bc
VP
1988}
1989EXPORT_SYMBOL(cxgb4_dbfifo_count);
1990
b8ff05a9
DM
1991/**
1992 * cxgb4_port_viid - get the VI id of a port
1993 * @dev: the net device for the port
1994 *
1995 * Return the VI id of the given port.
1996 */
1997unsigned int cxgb4_port_viid(const struct net_device *dev)
1998{
1999 return netdev2pinfo(dev)->viid;
2000}
2001EXPORT_SYMBOL(cxgb4_port_viid);
2002
2003/**
2004 * cxgb4_port_idx - get the index of a port
2005 * @dev: the net device for the port
2006 *
2007 * Return the index of the given port.
2008 */
2009unsigned int cxgb4_port_idx(const struct net_device *dev)
2010{
2011 return netdev2pinfo(dev)->port_id;
2012}
2013EXPORT_SYMBOL(cxgb4_port_idx);
2014
b8ff05a9
DM
2015void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2016 struct tp_tcp_stats *v6)
2017{
2018 struct adapter *adap = pci_get_drvdata(pdev);
2019
2020 spin_lock(&adap->stats_lock);
2021 t4_tp_get_tcp_stats(adap, v4, v6);
2022 spin_unlock(&adap->stats_lock);
2023}
2024EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2025
2026void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2027 const unsigned int *pgsz_order)
2028{
2029 struct adapter *adap = netdev2adap(dev);
2030
0d804338
HS
2031 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2032 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2033 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2034 HPZ3_V(pgsz_order[3]));
b8ff05a9
DM
2035}
2036EXPORT_SYMBOL(cxgb4_iscsi_init);
2037
3069ee9b
VP
2038int cxgb4_flush_eq_cache(struct net_device *dev)
2039{
2040 struct adapter *adap = netdev2adap(dev);
3069ee9b 2041
5d700ecb 2042 return t4_sge_ctxt_flush(adap, adap->mbox);
3069ee9b
VP
2043}
2044EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2045
2046static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2047{
f061de42 2048 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3069ee9b
VP
2049 __be64 indices;
2050 int ret;
2051
fc5ab020
HS
2052 spin_lock(&adap->win0_lock);
2053 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2054 sizeof(indices), (__be32 *)&indices,
2055 T4_MEMORY_READ);
2056 spin_unlock(&adap->win0_lock);
3069ee9b 2057 if (!ret) {
404d9e3f
VP
2058 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2059 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3069ee9b
VP
2060 }
2061 return ret;
2062}
2063
2064int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2065 u16 size)
2066{
2067 struct adapter *adap = netdev2adap(dev);
2068 u16 hw_pidx, hw_cidx;
2069 int ret;
2070
2071 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2072 if (ret)
2073 goto out;
2074
2075 if (pidx != hw_pidx) {
2076 u16 delta;
f612b815 2077 u32 val;
3069ee9b
VP
2078
2079 if (pidx >= hw_pidx)
2080 delta = pidx - hw_pidx;
2081 else
2082 delta = size - hw_pidx + pidx;
f612b815
HS
2083
2084 if (is_t4(adap->params.chip))
2085 val = PIDX_V(delta);
2086 else
2087 val = PIDX_T5_V(delta);
3069ee9b 2088 wmb();
f612b815
HS
2089 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2090 QID_V(qid) | val);
3069ee9b
VP
2091 }
2092out:
2093 return ret;
2094}
2095EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2096
031cf476
HS
2097int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2098{
2099 struct adapter *adap;
2100 u32 offset, memtype, memaddr;
6559a7e8 2101 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
031cf476
HS
2102 u32 edc0_end, edc1_end, mc0_end, mc1_end;
2103 int ret;
2104
2105 adap = netdev2adap(dev);
2106
2107 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2108
2109 /* Figure out where the offset lands in the Memory Type/Address scheme.
2110 * This code assumes that the memory is laid out starting at offset 0
2111 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2112 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2113 * MC0, and some have both MC0 and MC1.
2114 */
6559a7e8
HS
2115 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2116 edc0_size = EDRAM0_SIZE_G(size) << 20;
2117 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2118 edc1_size = EDRAM1_SIZE_G(size) << 20;
2119 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2120 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
031cf476
HS
2121
2122 edc0_end = edc0_size;
2123 edc1_end = edc0_end + edc1_size;
2124 mc0_end = edc1_end + mc0_size;
2125
2126 if (offset < edc0_end) {
2127 memtype = MEM_EDC0;
2128 memaddr = offset;
2129 } else if (offset < edc1_end) {
2130 memtype = MEM_EDC1;
2131 memaddr = offset - edc0_end;
2132 } else {
2133 if (offset < mc0_end) {
2134 memtype = MEM_MC0;
2135 memaddr = offset - edc1_end;
3ccc6cf7 2136 } else if (is_t5(adap->params.chip)) {
6559a7e8
HS
2137 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2138 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
031cf476
HS
2139 mc1_end = mc0_end + mc1_size;
2140 if (offset < mc1_end) {
2141 memtype = MEM_MC1;
2142 memaddr = offset - mc0_end;
2143 } else {
2144 /* offset beyond the end of any memory */
2145 goto err;
2146 }
3ccc6cf7
HS
2147 } else {
2148 /* T4/T6 only has a single memory channel */
2149 goto err;
031cf476
HS
2150 }
2151 }
2152
2153 spin_lock(&adap->win0_lock);
2154 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2155 spin_unlock(&adap->win0_lock);
2156 return ret;
2157
2158err:
2159 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2160 stag, offset);
2161 return -EINVAL;
2162}
2163EXPORT_SYMBOL(cxgb4_read_tpte);
2164
7730b4c7
HS
2165u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2166{
2167 u32 hi, lo;
2168 struct adapter *adap;
2169
2170 adap = netdev2adap(dev);
f612b815
HS
2171 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2172 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
7730b4c7
HS
2173
2174 return ((u64)hi << 32) | (u64)lo;
2175}
2176EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2177
df64e4d3
HS
2178int cxgb4_bar2_sge_qregs(struct net_device *dev,
2179 unsigned int qid,
2180 enum cxgb4_bar2_qtype qtype,
66cf188e 2181 int user,
df64e4d3
HS
2182 u64 *pbar2_qoffset,
2183 unsigned int *pbar2_qid)
2184{
b2612722 2185 return t4_bar2_sge_qregs(netdev2adap(dev),
df64e4d3
HS
2186 qid,
2187 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2188 ? T4_BAR2_QTYPE_EGRESS
2189 : T4_BAR2_QTYPE_INGRESS),
66cf188e 2190 user,
df64e4d3
HS
2191 pbar2_qoffset,
2192 pbar2_qid);
2193}
2194EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2195
b8ff05a9
DM
2196static struct pci_driver cxgb4_driver;
2197
2198static void check_neigh_update(struct neighbour *neigh)
2199{
2200 const struct device *parent;
2201 const struct net_device *netdev = neigh->dev;
2202
2203 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2204 netdev = vlan_dev_real_dev(netdev);
2205 parent = netdev->dev.parent;
2206 if (parent && parent->driver == &cxgb4_driver.driver)
2207 t4_l2t_update(dev_get_drvdata(parent), neigh);
2208}
2209
2210static int netevent_cb(struct notifier_block *nb, unsigned long event,
2211 void *data)
2212{
2213 switch (event) {
2214 case NETEVENT_NEIGH_UPDATE:
2215 check_neigh_update(data);
2216 break;
b8ff05a9
DM
2217 case NETEVENT_REDIRECT:
2218 default:
2219 break;
2220 }
2221 return 0;
2222}
2223
2224static bool netevent_registered;
2225static struct notifier_block cxgb4_netevent_nb = {
2226 .notifier_call = netevent_cb
2227};
2228
3069ee9b
VP
2229static void drain_db_fifo(struct adapter *adap, int usecs)
2230{
2cc301d2 2231 u32 v1, v2, lp_count, hp_count;
3069ee9b
VP
2232
2233 do {
f061de42
HS
2234 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2235 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
d14807dd 2236 if (is_t4(adap->params.chip)) {
f061de42
HS
2237 lp_count = LP_COUNT_G(v1);
2238 hp_count = HP_COUNT_G(v1);
2cc301d2 2239 } else {
f061de42
HS
2240 lp_count = LP_COUNT_T5_G(v1);
2241 hp_count = HP_COUNT_T5_G(v2);
2cc301d2
SR
2242 }
2243
2244 if (lp_count == 0 && hp_count == 0)
2245 break;
3069ee9b
VP
2246 set_current_state(TASK_UNINTERRUPTIBLE);
2247 schedule_timeout(usecs_to_jiffies(usecs));
3069ee9b
VP
2248 } while (1);
2249}
2250
2251static void disable_txq_db(struct sge_txq *q)
2252{
05eb2389
SW
2253 unsigned long flags;
2254
2255 spin_lock_irqsave(&q->db_lock, flags);
3069ee9b 2256 q->db_disabled = 1;
05eb2389 2257 spin_unlock_irqrestore(&q->db_lock, flags);
3069ee9b
VP
2258}
2259
05eb2389 2260static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3069ee9b
VP
2261{
2262 spin_lock_irq(&q->db_lock);
05eb2389
SW
2263 if (q->db_pidx_inc) {
2264 /* Make sure that all writes to the TX descriptors
2265 * are committed before we tell HW about them.
2266 */
2267 wmb();
f612b815
HS
2268 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2269 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
05eb2389
SW
2270 q->db_pidx_inc = 0;
2271 }
3069ee9b
VP
2272 q->db_disabled = 0;
2273 spin_unlock_irq(&q->db_lock);
2274}
2275
2276static void disable_dbs(struct adapter *adap)
2277{
2278 int i;
2279
2280 for_each_ethrxq(&adap->sge, i)
2281 disable_txq_db(&adap->sge.ethtxq[i].q);
f90ce561 2282 for_each_iscsirxq(&adap->sge, i)
3069ee9b
VP
2283 disable_txq_db(&adap->sge.ofldtxq[i].q);
2284 for_each_port(adap, i)
2285 disable_txq_db(&adap->sge.ctrlq[i].q);
2286}
2287
2288static void enable_dbs(struct adapter *adap)
2289{
2290 int i;
2291
2292 for_each_ethrxq(&adap->sge, i)
05eb2389 2293 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
f90ce561 2294 for_each_iscsirxq(&adap->sge, i)
05eb2389 2295 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3069ee9b 2296 for_each_port(adap, i)
05eb2389
SW
2297 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2298}
2299
2300static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2301{
2302 if (adap->uld_handle[CXGB4_ULD_RDMA])
2303 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2304 cmd);
2305}
2306
2307static void process_db_full(struct work_struct *work)
2308{
2309 struct adapter *adap;
2310
2311 adap = container_of(work, struct adapter, db_full_task);
2312
2313 drain_db_fifo(adap, dbfifo_drain_delay);
2314 enable_dbs(adap);
2315 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7
HS
2316 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2317 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2318 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2319 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2320 else
2321 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2322 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
3069ee9b
VP
2323}
2324
2325static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2326{
2327 u16 hw_pidx, hw_cidx;
2328 int ret;
2329
05eb2389 2330 spin_lock_irq(&q->db_lock);
3069ee9b
VP
2331 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2332 if (ret)
2333 goto out;
2334 if (q->db_pidx != hw_pidx) {
2335 u16 delta;
f612b815 2336 u32 val;
3069ee9b
VP
2337
2338 if (q->db_pidx >= hw_pidx)
2339 delta = q->db_pidx - hw_pidx;
2340 else
2341 delta = q->size - hw_pidx + q->db_pidx;
f612b815
HS
2342
2343 if (is_t4(adap->params.chip))
2344 val = PIDX_V(delta);
2345 else
2346 val = PIDX_T5_V(delta);
3069ee9b 2347 wmb();
f612b815
HS
2348 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2349 QID_V(q->cntxt_id) | val);
3069ee9b
VP
2350 }
2351out:
2352 q->db_disabled = 0;
05eb2389
SW
2353 q->db_pidx_inc = 0;
2354 spin_unlock_irq(&q->db_lock);
3069ee9b
VP
2355 if (ret)
2356 CH_WARN(adap, "DB drop recovery failed.\n");
2357}
2358static void recover_all_queues(struct adapter *adap)
2359{
2360 int i;
2361
2362 for_each_ethrxq(&adap->sge, i)
2363 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
f90ce561 2364 for_each_iscsirxq(&adap->sge, i)
3069ee9b
VP
2365 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2366 for_each_port(adap, i)
2367 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2368}
2369
881806bc
VP
2370static void process_db_drop(struct work_struct *work)
2371{
2372 struct adapter *adap;
881806bc 2373
3069ee9b 2374 adap = container_of(work, struct adapter, db_drop_task);
881806bc 2375
d14807dd 2376 if (is_t4(adap->params.chip)) {
05eb2389 2377 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2378 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
05eb2389 2379 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2380 recover_all_queues(adap);
05eb2389 2381 drain_db_fifo(adap, dbfifo_drain_delay);
2cc301d2 2382 enable_dbs(adap);
05eb2389 2383 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3ccc6cf7 2384 } else if (is_t5(adap->params.chip)) {
2cc301d2
SR
2385 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2386 u16 qid = (dropped_db >> 15) & 0x1ffff;
2387 u16 pidx_inc = dropped_db & 0x1fff;
df64e4d3
HS
2388 u64 bar2_qoffset;
2389 unsigned int bar2_qid;
2390 int ret;
2cc301d2 2391
b2612722 2392 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
e0456717 2393 0, &bar2_qoffset, &bar2_qid);
df64e4d3
HS
2394 if (ret)
2395 dev_err(adap->pdev_dev, "doorbell drop recovery: "
2396 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2397 else
f612b815 2398 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
df64e4d3 2399 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2cc301d2
SR
2400
2401 /* Re-enable BAR2 WC */
2402 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2403 }
2404
3ccc6cf7
HS
2405 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2406 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
881806bc
VP
2407}
2408
2409void t4_db_full(struct adapter *adap)
2410{
d14807dd 2411 if (is_t4(adap->params.chip)) {
05eb2389
SW
2412 disable_dbs(adap);
2413 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
f612b815
HS
2414 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2415 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
29aaee65 2416 queue_work(adap->workq, &adap->db_full_task);
2cc301d2 2417 }
881806bc
VP
2418}
2419
2420void t4_db_dropped(struct adapter *adap)
2421{
05eb2389
SW
2422 if (is_t4(adap->params.chip)) {
2423 disable_dbs(adap);
2424 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2425 }
29aaee65 2426 queue_work(adap->workq, &adap->db_drop_task);
881806bc
VP
2427}
2428
b8ff05a9
DM
2429static void uld_attach(struct adapter *adap, unsigned int uld)
2430{
2431 void *handle;
2432 struct cxgb4_lld_info lli;
dca4faeb 2433 unsigned short i;
b8ff05a9
DM
2434
2435 lli.pdev = adap->pdev;
b2612722 2436 lli.pf = adap->pf;
b8ff05a9
DM
2437 lli.l2t = adap->l2t;
2438 lli.tids = &adap->tids;
2439 lli.ports = adap->port;
2440 lli.vr = &adap->vres;
2441 lli.mtus = adap->params.mtus;
2442 if (uld == CXGB4_ULD_RDMA) {
2443 lli.rxq_ids = adap->sge.rdma_rxq;
cf38be6d 2444 lli.ciq_ids = adap->sge.rdma_ciq;
b8ff05a9 2445 lli.nrxq = adap->sge.rdmaqs;
cf38be6d 2446 lli.nciq = adap->sge.rdmaciqs;
b8ff05a9 2447 } else if (uld == CXGB4_ULD_ISCSI) {
f90ce561
HS
2448 lli.rxq_ids = adap->sge.iscsi_rxq;
2449 lli.nrxq = adap->sge.iscsiqsets;
f2692d16
VP
2450 } else if (uld == CXGB4_ULD_ISCSIT) {
2451 lli.rxq_ids = adap->sge.iscsit_rxq;
2452 lli.nrxq = adap->sge.niscsitq;
b8ff05a9 2453 }
f90ce561 2454 lli.ntxq = adap->sge.iscsiqsets;
b8ff05a9
DM
2455 lli.nchan = adap->params.nports;
2456 lli.nports = adap->params.nports;
2457 lli.wr_cred = adap->params.ofldq_wr_cred;
d14807dd 2458 lli.adapter_type = adap->params.chip;
837e4a42 2459 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
7730b4c7 2460 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
df64e4d3
HS
2461 lli.udb_density = 1 << adap->params.sge.eq_qpp;
2462 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
dcf7b6f5 2463 lli.filt_mode = adap->params.tp.vlan_pri_map;
dca4faeb
VP
2464 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2465 for (i = 0; i < NCHAN; i++)
2466 lli.tx_modq[i] = i;
f612b815
HS
2467 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
2468 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
b8ff05a9 2469 lli.fw_vers = adap->params.fw_vers;
3069ee9b 2470 lli.dbfifo_int_thresh = dbfifo_int_thresh;
04e10e21
HS
2471 lli.sge_ingpadboundary = adap->sge.fl_align;
2472 lli.sge_egrstatuspagesize = adap->sge.stat_len;
dca4faeb
VP
2473 lli.sge_pktshift = adap->sge.pktshift;
2474 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4c2c5763
HS
2475 lli.max_ordird_qp = adap->params.max_ordird_qp;
2476 lli.max_ird_adapter = adap->params.max_ird_adapter;
1ac0f095 2477 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
982b81eb 2478 lli.nodeid = dev_to_node(adap->pdev_dev);
b8ff05a9
DM
2479
2480 handle = ulds[uld].add(&lli);
2481 if (IS_ERR(handle)) {
2482 dev_warn(adap->pdev_dev,
2483 "could not attach to the %s driver, error %ld\n",
2484 uld_str[uld], PTR_ERR(handle));
2485 return;
2486 }
2487
2488 adap->uld_handle[uld] = handle;
2489
2490 if (!netevent_registered) {
2491 register_netevent_notifier(&cxgb4_netevent_nb);
2492 netevent_registered = true;
2493 }
e29f5dbc
DM
2494
2495 if (adap->flags & FULL_INIT_DONE)
2496 ulds[uld].state_change(handle, CXGB4_STATE_UP);
b8ff05a9
DM
2497}
2498
2499static void attach_ulds(struct adapter *adap)
2500{
2501 unsigned int i;
2502
01bcca68
VP
2503 spin_lock(&adap_rcu_lock);
2504 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
2505 spin_unlock(&adap_rcu_lock);
2506
b8ff05a9
DM
2507 mutex_lock(&uld_mutex);
2508 list_add_tail(&adap->list_node, &adapter_list);
2509 for (i = 0; i < CXGB4_ULD_MAX; i++)
2510 if (ulds[i].add)
2511 uld_attach(adap, i);
2512 mutex_unlock(&uld_mutex);
2513}
2514
2515static void detach_ulds(struct adapter *adap)
2516{
2517 unsigned int i;
2518
2519 mutex_lock(&uld_mutex);
2520 list_del(&adap->list_node);
2521 for (i = 0; i < CXGB4_ULD_MAX; i++)
2522 if (adap->uld_handle[i]) {
2523 ulds[i].state_change(adap->uld_handle[i],
2524 CXGB4_STATE_DETACH);
2525 adap->uld_handle[i] = NULL;
2526 }
2527 if (netevent_registered && list_empty(&adapter_list)) {
2528 unregister_netevent_notifier(&cxgb4_netevent_nb);
2529 netevent_registered = false;
2530 }
2531 mutex_unlock(&uld_mutex);
01bcca68
VP
2532
2533 spin_lock(&adap_rcu_lock);
2534 list_del_rcu(&adap->rcu_node);
2535 spin_unlock(&adap_rcu_lock);
b8ff05a9
DM
2536}
2537
2538static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2539{
2540 unsigned int i;
2541
2542 mutex_lock(&uld_mutex);
2543 for (i = 0; i < CXGB4_ULD_MAX; i++)
2544 if (adap->uld_handle[i])
2545 ulds[i].state_change(adap->uld_handle[i], new_state);
2546 mutex_unlock(&uld_mutex);
2547}
2548
2549/**
2550 * cxgb4_register_uld - register an upper-layer driver
2551 * @type: the ULD type
2552 * @p: the ULD methods
2553 *
2554 * Registers an upper-layer driver with this driver and notifies the ULD
2555 * about any presently available devices that support its type. Returns
2556 * %-EBUSY if a ULD of the same type is already registered.
2557 */
2558int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2559{
2560 int ret = 0;
2561 struct adapter *adap;
2562
2563 if (type >= CXGB4_ULD_MAX)
2564 return -EINVAL;
2565 mutex_lock(&uld_mutex);
2566 if (ulds[type].add) {
2567 ret = -EBUSY;
2568 goto out;
2569 }
2570 ulds[type] = *p;
2571 list_for_each_entry(adap, &adapter_list, list_node)
2572 uld_attach(adap, type);
2573out: mutex_unlock(&uld_mutex);
2574 return ret;
2575}
2576EXPORT_SYMBOL(cxgb4_register_uld);
2577
2578/**
2579 * cxgb4_unregister_uld - unregister an upper-layer driver
2580 * @type: the ULD type
2581 *
2582 * Unregisters an existing upper-layer driver.
2583 */
2584int cxgb4_unregister_uld(enum cxgb4_uld type)
2585{
2586 struct adapter *adap;
2587
2588 if (type >= CXGB4_ULD_MAX)
2589 return -EINVAL;
2590 mutex_lock(&uld_mutex);
2591 list_for_each_entry(adap, &adapter_list, list_node)
2592 adap->uld_handle[type] = NULL;
2593 ulds[type].add = NULL;
2594 mutex_unlock(&uld_mutex);
2595 return 0;
2596}
2597EXPORT_SYMBOL(cxgb4_unregister_uld);
2598
1bb60376 2599#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
2600static int cxgb4_inet6addr_handler(struct notifier_block *this,
2601 unsigned long event, void *data)
01bcca68 2602{
b5a02f50
AB
2603 struct inet6_ifaddr *ifa = data;
2604 struct net_device *event_dev = ifa->idev->dev;
2605 const struct device *parent = NULL;
2606#if IS_ENABLED(CONFIG_BONDING)
01bcca68 2607 struct adapter *adap;
b5a02f50
AB
2608#endif
2609 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
2610 event_dev = vlan_dev_real_dev(event_dev);
2611#if IS_ENABLED(CONFIG_BONDING)
2612 if (event_dev->flags & IFF_MASTER) {
2613 list_for_each_entry(adap, &adapter_list, list_node) {
2614 switch (event) {
2615 case NETDEV_UP:
2616 cxgb4_clip_get(adap->port[0],
2617 (const u32 *)ifa, 1);
2618 break;
2619 case NETDEV_DOWN:
2620 cxgb4_clip_release(adap->port[0],
2621 (const u32 *)ifa, 1);
2622 break;
2623 default:
2624 break;
2625 }
2626 }
2627 return NOTIFY_OK;
2628 }
2629#endif
01bcca68 2630
b5a02f50
AB
2631 if (event_dev)
2632 parent = event_dev->dev.parent;
01bcca68 2633
b5a02f50 2634 if (parent && parent->driver == &cxgb4_driver.driver) {
01bcca68
VP
2635 switch (event) {
2636 case NETDEV_UP:
b5a02f50 2637 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2638 break;
2639 case NETDEV_DOWN:
b5a02f50 2640 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
01bcca68
VP
2641 break;
2642 default:
2643 break;
2644 }
2645 }
b5a02f50 2646 return NOTIFY_OK;
01bcca68
VP
2647}
2648
b5a02f50 2649static bool inet6addr_registered;
01bcca68
VP
2650static struct notifier_block cxgb4_inet6addr_notifier = {
2651 .notifier_call = cxgb4_inet6addr_handler
2652};
2653
01bcca68
VP
2654static void update_clip(const struct adapter *adap)
2655{
2656 int i;
2657 struct net_device *dev;
2658 int ret;
2659
2660 rcu_read_lock();
2661
2662 for (i = 0; i < MAX_NPORTS; i++) {
2663 dev = adap->port[i];
2664 ret = 0;
2665
2666 if (dev)
b5a02f50 2667 ret = cxgb4_update_root_dev_clip(dev);
01bcca68
VP
2668
2669 if (ret < 0)
2670 break;
2671 }
2672 rcu_read_unlock();
2673}
1bb60376 2674#endif /* IS_ENABLED(CONFIG_IPV6) */
01bcca68 2675
b8ff05a9
DM
2676/**
2677 * cxgb_up - enable the adapter
2678 * @adap: adapter being enabled
2679 *
2680 * Called when the first port is enabled, this function performs the
2681 * actions necessary to make an adapter operational, such as completing
2682 * the initialization of HW modules, and enabling interrupts.
2683 *
2684 * Must be called with the rtnl lock held.
2685 */
2686static int cxgb_up(struct adapter *adap)
2687{
aaefae9b 2688 int err;
b8ff05a9 2689
aaefae9b
DM
2690 err = setup_sge_queues(adap);
2691 if (err)
2692 goto out;
2693 err = setup_rss(adap);
2694 if (err)
2695 goto freeq;
b8ff05a9
DM
2696
2697 if (adap->flags & USING_MSIX) {
aaefae9b 2698 name_msix_vecs(adap);
b8ff05a9
DM
2699 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2700 adap->msix_info[0].desc, adap);
2701 if (err)
2702 goto irq_err;
2703
2704 err = request_msix_queue_irqs(adap);
2705 if (err) {
2706 free_irq(adap->msix_info[0].vec, adap);
2707 goto irq_err;
2708 }
2709 } else {
2710 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2711 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
b1a3c2b6 2712 adap->port[0]->name, adap);
b8ff05a9
DM
2713 if (err)
2714 goto irq_err;
2715 }
2716 enable_rx(adap);
2717 t4_sge_start(adap);
2718 t4_intr_enable(adap);
aaefae9b 2719 adap->flags |= FULL_INIT_DONE;
b8ff05a9 2720 notify_ulds(adap, CXGB4_STATE_UP);
1bb60376 2721#if IS_ENABLED(CONFIG_IPV6)
01bcca68 2722 update_clip(adap);
1bb60376 2723#endif
b8ff05a9
DM
2724 out:
2725 return err;
2726 irq_err:
2727 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
aaefae9b
DM
2728 freeq:
2729 t4_free_sge_resources(adap);
b8ff05a9
DM
2730 goto out;
2731}
2732
2733static void cxgb_down(struct adapter *adapter)
2734{
b8ff05a9 2735 cancel_work_sync(&adapter->tid_release_task);
881806bc
VP
2736 cancel_work_sync(&adapter->db_full_task);
2737 cancel_work_sync(&adapter->db_drop_task);
b8ff05a9 2738 adapter->tid_release_task_busy = false;
204dc3c0 2739 adapter->tid_release_head = NULL;
b8ff05a9 2740
aaefae9b
DM
2741 t4_sge_stop(adapter);
2742 t4_free_sge_resources(adapter);
2743 adapter->flags &= ~FULL_INIT_DONE;
b8ff05a9
DM
2744}
2745
2746/*
2747 * net_device operations
2748 */
2749static int cxgb_open(struct net_device *dev)
2750{
2751 int err;
2752 struct port_info *pi = netdev_priv(dev);
2753 struct adapter *adapter = pi->adapter;
2754
6a3c869a
DM
2755 netif_carrier_off(dev);
2756
aaefae9b
DM
2757 if (!(adapter->flags & FULL_INIT_DONE)) {
2758 err = cxgb_up(adapter);
2759 if (err < 0)
2760 return err;
2761 }
b8ff05a9 2762
f68707b8
DM
2763 err = link_start(dev);
2764 if (!err)
2765 netif_tx_start_all_queues(dev);
2766 return err;
b8ff05a9
DM
2767}
2768
2769static int cxgb_close(struct net_device *dev)
2770{
b8ff05a9
DM
2771 struct port_info *pi = netdev_priv(dev);
2772 struct adapter *adapter = pi->adapter;
2773
2774 netif_tx_stop_all_queues(dev);
2775 netif_carrier_off(dev);
b2612722 2776 return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
b8ff05a9
DM
2777}
2778
f2b7e78d
VP
2779/* Return an error number if the indicated filter isn't writable ...
2780 */
2781static int writable_filter(struct filter_entry *f)
2782{
2783 if (f->locked)
2784 return -EPERM;
2785 if (f->pending)
2786 return -EBUSY;
2787
2788 return 0;
2789}
2790
2791/* Delete the filter at the specified index (if valid). The checks for all
2792 * the common problems with doing this like the filter being locked, currently
2793 * pending in another operation, etc.
2794 */
2795static int delete_filter(struct adapter *adapter, unsigned int fidx)
2796{
2797 struct filter_entry *f;
2798 int ret;
2799
dca4faeb 2800 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
f2b7e78d
VP
2801 return -EINVAL;
2802
2803 f = &adapter->tids.ftid_tab[fidx];
2804 ret = writable_filter(f);
2805 if (ret)
2806 return ret;
2807 if (f->valid)
2808 return del_filter_wr(adapter, fidx);
2809
2810 return 0;
2811}
2812
dca4faeb 2813int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
793dad94
VP
2814 __be32 sip, __be16 sport, __be16 vlan,
2815 unsigned int queue, unsigned char port, unsigned char mask)
dca4faeb
VP
2816{
2817 int ret;
2818 struct filter_entry *f;
2819 struct adapter *adap;
2820 int i;
2821 u8 *val;
2822
2823 adap = netdev2adap(dev);
2824
1cab775c 2825 /* Adjust stid to correct filter index */
470c60c4 2826 stid -= adap->tids.sftid_base;
1cab775c
VP
2827 stid += adap->tids.nftids;
2828
dca4faeb
VP
2829 /* Check to make sure the filter requested is writable ...
2830 */
2831 f = &adap->tids.ftid_tab[stid];
2832 ret = writable_filter(f);
2833 if (ret)
2834 return ret;
2835
2836 /* Clear out any old resources being used by the filter before
2837 * we start constructing the new filter.
2838 */
2839 if (f->valid)
2840 clear_filter(adap, f);
2841
2842 /* Clear out filter specifications */
2843 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2844 f->fs.val.lport = cpu_to_be16(sport);
2845 f->fs.mask.lport = ~0;
2846 val = (u8 *)&sip;
793dad94 2847 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
dca4faeb
VP
2848 for (i = 0; i < 4; i++) {
2849 f->fs.val.lip[i] = val[i];
2850 f->fs.mask.lip[i] = ~0;
2851 }
0d804338 2852 if (adap->params.tp.vlan_pri_map & PORT_F) {
793dad94
VP
2853 f->fs.val.iport = port;
2854 f->fs.mask.iport = mask;
2855 }
2856 }
dca4faeb 2857
0d804338 2858 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
7c89e555
KS
2859 f->fs.val.proto = IPPROTO_TCP;
2860 f->fs.mask.proto = ~0;
2861 }
2862
dca4faeb
VP
2863 f->fs.dirsteer = 1;
2864 f->fs.iq = queue;
2865 /* Mark filter as locked */
2866 f->locked = 1;
2867 f->fs.rpttid = 1;
2868
2869 ret = set_filter_wr(adap, stid);
2870 if (ret) {
2871 clear_filter(adap, f);
2872 return ret;
2873 }
2874
2875 return 0;
2876}
2877EXPORT_SYMBOL(cxgb4_create_server_filter);
2878
2879int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2880 unsigned int queue, bool ipv6)
2881{
2882 int ret;
2883 struct filter_entry *f;
2884 struct adapter *adap;
2885
2886 adap = netdev2adap(dev);
1cab775c
VP
2887
2888 /* Adjust stid to correct filter index */
470c60c4 2889 stid -= adap->tids.sftid_base;
1cab775c
VP
2890 stid += adap->tids.nftids;
2891
dca4faeb
VP
2892 f = &adap->tids.ftid_tab[stid];
2893 /* Unlock the filter */
2894 f->locked = 0;
2895
2896 ret = delete_filter(adap, stid);
2897 if (ret)
2898 return ret;
2899
2900 return 0;
2901}
2902EXPORT_SYMBOL(cxgb4_remove_server_filter);
2903
f5152c90
DM
2904static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2905 struct rtnl_link_stats64 *ns)
b8ff05a9
DM
2906{
2907 struct port_stats stats;
2908 struct port_info *p = netdev_priv(dev);
2909 struct adapter *adapter = p->adapter;
b8ff05a9 2910
9fe6cb58
GS
2911 /* Block retrieving statistics during EEH error
2912 * recovery. Otherwise, the recovery might fail
2913 * and the PCI device will be removed permanently
2914 */
b8ff05a9 2915 spin_lock(&adapter->stats_lock);
9fe6cb58
GS
2916 if (!netif_device_present(dev)) {
2917 spin_unlock(&adapter->stats_lock);
2918 return ns;
2919 }
a4cfd929
HS
2920 t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
2921 &p->stats_base);
b8ff05a9
DM
2922 spin_unlock(&adapter->stats_lock);
2923
2924 ns->tx_bytes = stats.tx_octets;
2925 ns->tx_packets = stats.tx_frames;
2926 ns->rx_bytes = stats.rx_octets;
2927 ns->rx_packets = stats.rx_frames;
2928 ns->multicast = stats.rx_mcast_frames;
2929
2930 /* detailed rx_errors */
2931 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2932 stats.rx_runt;
2933 ns->rx_over_errors = 0;
2934 ns->rx_crc_errors = stats.rx_fcs_err;
2935 ns->rx_frame_errors = stats.rx_symbol_err;
2936 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2937 stats.rx_ovflow2 + stats.rx_ovflow3 +
2938 stats.rx_trunc0 + stats.rx_trunc1 +
2939 stats.rx_trunc2 + stats.rx_trunc3;
2940 ns->rx_missed_errors = 0;
2941
2942 /* detailed tx_errors */
2943 ns->tx_aborted_errors = 0;
2944 ns->tx_carrier_errors = 0;
2945 ns->tx_fifo_errors = 0;
2946 ns->tx_heartbeat_errors = 0;
2947 ns->tx_window_errors = 0;
2948
2949 ns->tx_errors = stats.tx_error_frames;
2950 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2951 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2952 return ns;
2953}
2954
2955static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2956{
060e0c75 2957 unsigned int mbox;
b8ff05a9
DM
2958 int ret = 0, prtad, devad;
2959 struct port_info *pi = netdev_priv(dev);
2960 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2961
2962 switch (cmd) {
2963 case SIOCGMIIPHY:
2964 if (pi->mdio_addr < 0)
2965 return -EOPNOTSUPP;
2966 data->phy_id = pi->mdio_addr;
2967 break;
2968 case SIOCGMIIREG:
2969 case SIOCSMIIREG:
2970 if (mdio_phy_id_is_c45(data->phy_id)) {
2971 prtad = mdio_phy_id_prtad(data->phy_id);
2972 devad = mdio_phy_id_devad(data->phy_id);
2973 } else if (data->phy_id < 32) {
2974 prtad = data->phy_id;
2975 devad = 0;
2976 data->reg_num &= 0x1f;
2977 } else
2978 return -EINVAL;
2979
b2612722 2980 mbox = pi->adapter->pf;
b8ff05a9 2981 if (cmd == SIOCGMIIREG)
060e0c75 2982 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2983 data->reg_num, &data->val_out);
2984 else
060e0c75 2985 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
b8ff05a9
DM
2986 data->reg_num, data->val_in);
2987 break;
5e2a5ebc
HS
2988 case SIOCGHWTSTAMP:
2989 return copy_to_user(req->ifr_data, &pi->tstamp_config,
2990 sizeof(pi->tstamp_config)) ?
2991 -EFAULT : 0;
2992 case SIOCSHWTSTAMP:
2993 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
2994 sizeof(pi->tstamp_config)))
2995 return -EFAULT;
2996
2997 switch (pi->tstamp_config.rx_filter) {
2998 case HWTSTAMP_FILTER_NONE:
2999 pi->rxtstamp = false;
3000 break;
3001 case HWTSTAMP_FILTER_ALL:
3002 pi->rxtstamp = true;
3003 break;
3004 default:
3005 pi->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
3006 return -ERANGE;
3007 }
3008
3009 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3010 sizeof(pi->tstamp_config)) ?
3011 -EFAULT : 0;
b8ff05a9
DM
3012 default:
3013 return -EOPNOTSUPP;
3014 }
3015 return ret;
3016}
3017
3018static void cxgb_set_rxmode(struct net_device *dev)
3019{
3020 /* unfortunately we can't return errors to the stack */
3021 set_rxmode(dev, -1, false);
3022}
3023
3024static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3025{
3026 int ret;
3027 struct port_info *pi = netdev_priv(dev);
3028
3029 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
3030 return -EINVAL;
b2612722 3031 ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
060e0c75 3032 -1, -1, -1, true);
b8ff05a9
DM
3033 if (!ret)
3034 dev->mtu = new_mtu;
3035 return ret;
3036}
3037
3038static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3039{
3040 int ret;
3041 struct sockaddr *addr = p;
3042 struct port_info *pi = netdev_priv(dev);
3043
3044 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 3045 return -EADDRNOTAVAIL;
b8ff05a9 3046
b2612722 3047 ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
060e0c75 3048 pi->xact_addr_filt, addr->sa_data, true, true);
b8ff05a9
DM
3049 if (ret < 0)
3050 return ret;
3051
3052 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3053 pi->xact_addr_filt = ret;
3054 return 0;
3055}
3056
b8ff05a9
DM
3057#ifdef CONFIG_NET_POLL_CONTROLLER
3058static void cxgb_netpoll(struct net_device *dev)
3059{
3060 struct port_info *pi = netdev_priv(dev);
3061 struct adapter *adap = pi->adapter;
3062
3063 if (adap->flags & USING_MSIX) {
3064 int i;
3065 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3066
3067 for (i = pi->nqsets; i; i--, rx++)
3068 t4_sge_intr_msix(0, &rx->rspq);
3069 } else
3070 t4_intr_handler(adap)(0, adap);
3071}
3072#endif
3073
3074static const struct net_device_ops cxgb4_netdev_ops = {
3075 .ndo_open = cxgb_open,
3076 .ndo_stop = cxgb_close,
3077 .ndo_start_xmit = t4_eth_xmit,
688848b1 3078 .ndo_select_queue = cxgb_select_queue,
9be793bf 3079 .ndo_get_stats64 = cxgb_get_stats,
b8ff05a9
DM
3080 .ndo_set_rx_mode = cxgb_set_rxmode,
3081 .ndo_set_mac_address = cxgb_set_mac_addr,
2ed28baa 3082 .ndo_set_features = cxgb_set_features,
b8ff05a9
DM
3083 .ndo_validate_addr = eth_validate_addr,
3084 .ndo_do_ioctl = cxgb_ioctl,
3085 .ndo_change_mtu = cxgb_change_mtu,
b8ff05a9
DM
3086#ifdef CONFIG_NET_POLL_CONTROLLER
3087 .ndo_poll_controller = cxgb_netpoll,
3088#endif
84a200b3
VP
3089#ifdef CONFIG_CHELSIO_T4_FCOE
3090 .ndo_fcoe_enable = cxgb_fcoe_enable,
3091 .ndo_fcoe_disable = cxgb_fcoe_disable,
3092#endif /* CONFIG_CHELSIO_T4_FCOE */
3a336cb1
HS
3093#ifdef CONFIG_NET_RX_BUSY_POLL
3094 .ndo_busy_poll = cxgb_busy_poll,
3095#endif
3096
b8ff05a9
DM
3097};
3098
3099void t4_fatal_err(struct adapter *adap)
3100{
f612b815 3101 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
b8ff05a9
DM
3102 t4_intr_disable(adap);
3103 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3104}
3105
3106static void setup_memwin(struct adapter *adap)
3107{
b562fc37 3108 u32 nic_win_base = t4_get_util_window(adap);
b8ff05a9 3109
b562fc37 3110 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
636f9d37
VP
3111}
3112
3113static void setup_memwin_rdma(struct adapter *adap)
3114{
1ae970e0 3115 if (adap->vres.ocq.size) {
0abfd152
HS
3116 u32 start;
3117 unsigned int sz_kb;
1ae970e0 3118
0abfd152
HS
3119 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3120 start &= PCI_BASE_ADDRESS_MEM_MASK;
3121 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
1ae970e0
DM
3122 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3123 t4_write_reg(adap,
f061de42
HS
3124 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3125 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
1ae970e0 3126 t4_write_reg(adap,
f061de42 3127 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
1ae970e0
DM
3128 adap->vres.ocq.start);
3129 t4_read_reg(adap,
f061de42 3130 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
1ae970e0 3131 }
b8ff05a9
DM
3132}
3133
02b5fb8e
DM
3134static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3135{
3136 u32 v;
3137 int ret;
3138
3139 /* get device capabilities */
3140 memset(c, 0, sizeof(*c));
e2ac9628
HS
3141 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3142 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 3143 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
b2612722 3144 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
02b5fb8e
DM
3145 if (ret < 0)
3146 return ret;
3147
e2ac9628
HS
3148 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3149 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
b2612722 3150 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
02b5fb8e
DM
3151 if (ret < 0)
3152 return ret;
3153
b2612722 3154 ret = t4_config_glbl_rss(adap, adap->pf,
02b5fb8e 3155 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
b2e1a3f0
HS
3156 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
3157 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
02b5fb8e
DM
3158 if (ret < 0)
3159 return ret;
3160
b2612722 3161 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4b8e27a8
HS
3162 MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
3163 FW_CMD_CAP_PF);
02b5fb8e
DM
3164 if (ret < 0)
3165 return ret;
3166
3167 t4_sge_init(adap);
3168
02b5fb8e 3169 /* tweak some settings */
837e4a42 3170 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
0d804338 3171 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
837e4a42
HS
3172 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
3173 v = t4_read_reg(adap, TP_PIO_DATA_A);
3174 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
060e0c75 3175
dca4faeb
VP
3176 /* first 4 Tx modulation queues point to consecutive Tx channels */
3177 adap->params.tp.tx_modq_map = 0xE4;
0d804338
HS
3178 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
3179 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
dca4faeb
VP
3180
3181 /* associate each Tx modulation queue with consecutive Tx channels */
3182 v = 0x84218421;
837e4a42 3183 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3184 &v, 1, TP_TX_SCHED_HDR_A);
837e4a42 3185 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3186 &v, 1, TP_TX_SCHED_FIFO_A);
837e4a42 3187 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
0d804338 3188 &v, 1, TP_TX_SCHED_PCMD_A);
dca4faeb
VP
3189
3190#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3191 if (is_offload(adap)) {
0d804338
HS
3192 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
3193 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3194 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3195 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3196 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3197 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
3198 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3199 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3200 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3201 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
dca4faeb
VP
3202 }
3203
060e0c75 3204 /* get basic stuff going */
b2612722 3205 return t4_early_init(adap, adap->pf);
02b5fb8e
DM
3206}
3207
b8ff05a9
DM
3208/*
3209 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3210 */
3211#define MAX_ATIDS 8192U
3212
636f9d37
VP
3213/*
3214 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3215 *
3216 * If the firmware we're dealing with has Configuration File support, then
3217 * we use that to perform all configuration
3218 */
3219
3220/*
3221 * Tweak configuration based on module parameters, etc. Most of these have
3222 * defaults assigned to them by Firmware Configuration Files (if we're using
3223 * them) but need to be explicitly set if we're using hard-coded
3224 * initialization. But even in the case of using Firmware Configuration
3225 * Files, we'd like to expose the ability to change these via module
3226 * parameters so these are essentially common tweaks/settings for
3227 * Configuration Files and hard-coded initialization ...
3228 */
3229static int adap_init0_tweaks(struct adapter *adapter)
3230{
3231 /*
3232 * Fix up various Host-Dependent Parameters like Page Size, Cache
3233 * Line Size, etc. The firmware default is for a 4KB Page Size and
3234 * 64B Cache Line Size ...
3235 */
3236 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3237
3238 /*
3239 * Process module parameters which affect early initialization.
3240 */
3241 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3242 dev_err(&adapter->pdev->dev,
3243 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3244 rx_dma_offset);
3245 rx_dma_offset = 2;
3246 }
f612b815
HS
3247 t4_set_reg_field(adapter, SGE_CONTROL_A,
3248 PKTSHIFT_V(PKTSHIFT_M),
3249 PKTSHIFT_V(rx_dma_offset));
636f9d37
VP
3250
3251 /*
3252 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3253 * adds the pseudo header itself.
3254 */
837e4a42
HS
3255 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
3256 CSUM_HAS_PSEUDO_HDR_F, 0);
636f9d37
VP
3257
3258 return 0;
3259}
3260
01b69614
HS
3261/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3262 * unto themselves and they contain their own firmware to perform their
3263 * tasks ...
3264 */
3265static int phy_aq1202_version(const u8 *phy_fw_data,
3266 size_t phy_fw_size)
3267{
3268 int offset;
3269
3270 /* At offset 0x8 you're looking for the primary image's
3271 * starting offset which is 3 Bytes wide
3272 *
3273 * At offset 0xa of the primary image, you look for the offset
3274 * of the DRAM segment which is 3 Bytes wide.
3275 *
3276 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3277 * wide
3278 */
3279 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3280 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3281 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3282
3283 offset = le24(phy_fw_data + 0x8) << 12;
3284 offset = le24(phy_fw_data + offset + 0xa);
3285 return be16(phy_fw_data + offset + 0x27e);
3286
3287 #undef be16
3288 #undef le16
3289 #undef le24
3290}
3291
3292static struct info_10gbt_phy_fw {
3293 unsigned int phy_fw_id; /* PCI Device ID */
3294 char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
3295 int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
3296 int phy_flash; /* Has FLASH for PHY Firmware */
3297} phy_info_array[] = {
3298 {
3299 PHY_AQ1202_DEVICEID,
3300 PHY_AQ1202_FIRMWARE,
3301 phy_aq1202_version,
3302 1,
3303 },
3304 {
3305 PHY_BCM84834_DEVICEID,
3306 PHY_BCM84834_FIRMWARE,
3307 NULL,
3308 0,
3309 },
3310 { 0, NULL, NULL },
3311};
3312
3313static struct info_10gbt_phy_fw *find_phy_info(int devid)
3314{
3315 int i;
3316
3317 for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
3318 if (phy_info_array[i].phy_fw_id == devid)
3319 return &phy_info_array[i];
3320 }
3321 return NULL;
3322}
3323
3324/* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3325 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3326 * we return a negative error number. If we transfer new firmware we return 1
3327 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3328 */
3329static int adap_init0_phy(struct adapter *adap)
3330{
3331 const struct firmware *phyf;
3332 int ret;
3333 struct info_10gbt_phy_fw *phy_info;
3334
3335 /* Use the device ID to determine which PHY file to flash.
3336 */
3337 phy_info = find_phy_info(adap->pdev->device);
3338 if (!phy_info) {
3339 dev_warn(adap->pdev_dev,
3340 "No PHY Firmware file found for this PHY\n");
3341 return -EOPNOTSUPP;
3342 }
3343
3344 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3345 * use that. The adapter firmware provides us with a memory buffer
3346 * where we can load a PHY firmware file from the host if we want to
3347 * override the PHY firmware File in flash.
3348 */
3349 ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
3350 adap->pdev_dev);
3351 if (ret < 0) {
3352 /* For adapters without FLASH attached to PHY for their
3353 * firmware, it's obviously a fatal error if we can't get the
3354 * firmware to the adapter. For adapters with PHY firmware
3355 * FLASH storage, it's worth a warning if we can't find the
3356 * PHY Firmware but we'll neuter the error ...
3357 */
3358 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
3359 "/lib/firmware/%s, error %d\n",
3360 phy_info->phy_fw_file, -ret);
3361 if (phy_info->phy_flash) {
3362 int cur_phy_fw_ver = 0;
3363
3364 t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3365 dev_warn(adap->pdev_dev, "continuing with, on-adapter "
3366 "FLASH copy, version %#x\n", cur_phy_fw_ver);
3367 ret = 0;
3368 }
3369
3370 return ret;
3371 }
3372
3373 /* Load PHY Firmware onto adapter.
3374 */
3375 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
3376 phy_info->phy_fw_version,
3377 (u8 *)phyf->data, phyf->size);
3378 if (ret < 0)
3379 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
3380 -ret);
3381 else if (ret > 0) {
3382 int new_phy_fw_ver = 0;
3383
3384 if (phy_info->phy_fw_version)
3385 new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
3386 phyf->size);
3387 dev_info(adap->pdev_dev, "Successfully transferred PHY "
3388 "Firmware /lib/firmware/%s, version %#x\n",
3389 phy_info->phy_fw_file, new_phy_fw_ver);
3390 }
3391
3392 release_firmware(phyf);
3393
3394 return ret;
3395}
3396
636f9d37
VP
3397/*
3398 * Attempt to initialize the adapter via a Firmware Configuration File.
3399 */
3400static int adap_init0_config(struct adapter *adapter, int reset)
3401{
3402 struct fw_caps_config_cmd caps_cmd;
3403 const struct firmware *cf;
3404 unsigned long mtype = 0, maddr = 0;
3405 u32 finiver, finicsum, cfcsum;
16e47624
HS
3406 int ret;
3407 int config_issued = 0;
0a57a536 3408 char *fw_config_file, fw_config_file_path[256];
16e47624 3409 char *config_name = NULL;
636f9d37
VP
3410
3411 /*
3412 * Reset device if necessary.
3413 */
3414 if (reset) {
3415 ret = t4_fw_reset(adapter, adapter->mbox,
0d804338 3416 PIORSTMODE_F | PIORST_F);
636f9d37
VP
3417 if (ret < 0)
3418 goto bye;
3419 }
3420
01b69614
HS
3421 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3422 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3423 * to be performed after any global adapter RESET above since some
3424 * PHYs only have local RAM copies of the PHY firmware.
3425 */
3426 if (is_10gbt_device(adapter->pdev->device)) {
3427 ret = adap_init0_phy(adapter);
3428 if (ret < 0)
3429 goto bye;
3430 }
636f9d37
VP
3431 /*
3432 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3433 * then use that. Otherwise, use the configuration file stored
3434 * in the adapter flash ...
3435 */
d14807dd 3436 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
0a57a536 3437 case CHELSIO_T4:
16e47624 3438 fw_config_file = FW4_CFNAME;
0a57a536
SR
3439 break;
3440 case CHELSIO_T5:
3441 fw_config_file = FW5_CFNAME;
3442 break;
3ccc6cf7
HS
3443 case CHELSIO_T6:
3444 fw_config_file = FW6_CFNAME;
3445 break;
0a57a536
SR
3446 default:
3447 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3448 adapter->pdev->device);
3449 ret = -EINVAL;
3450 goto bye;
3451 }
3452
3453 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
636f9d37 3454 if (ret < 0) {
16e47624 3455 config_name = "On FLASH";
636f9d37
VP
3456 mtype = FW_MEMTYPE_CF_FLASH;
3457 maddr = t4_flash_cfg_addr(adapter);
3458 } else {
3459 u32 params[7], val[7];
3460
16e47624
HS
3461 sprintf(fw_config_file_path,
3462 "/lib/firmware/%s", fw_config_file);
3463 config_name = fw_config_file_path;
3464
636f9d37
VP
3465 if (cf->size >= FLASH_CFG_MAX_SIZE)
3466 ret = -ENOMEM;
3467 else {
5167865a
HS
3468 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3469 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
636f9d37 3470 ret = t4_query_params(adapter, adapter->mbox,
b2612722 3471 adapter->pf, 0, 1, params, val);
636f9d37
VP
3472 if (ret == 0) {
3473 /*
fc5ab020 3474 * For t4_memory_rw() below addresses and
636f9d37
VP
3475 * sizes have to be in terms of multiples of 4
3476 * bytes. So, if the Configuration File isn't
3477 * a multiple of 4 bytes in length we'll have
3478 * to write that out separately since we can't
3479 * guarantee that the bytes following the
3480 * residual byte in the buffer returned by
3481 * request_firmware() are zeroed out ...
3482 */
3483 size_t resid = cf->size & 0x3;
3484 size_t size = cf->size & ~0x3;
3485 __be32 *data = (__be32 *)cf->data;
3486
5167865a
HS
3487 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
3488 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
636f9d37 3489
fc5ab020
HS
3490 spin_lock(&adapter->win0_lock);
3491 ret = t4_memory_rw(adapter, 0, mtype, maddr,
3492 size, data, T4_MEMORY_WRITE);
636f9d37
VP
3493 if (ret == 0 && resid != 0) {
3494 union {
3495 __be32 word;
3496 char buf[4];
3497 } last;
3498 int i;
3499
3500 last.word = data[size >> 2];
3501 for (i = resid; i < 4; i++)
3502 last.buf[i] = 0;
fc5ab020
HS
3503 ret = t4_memory_rw(adapter, 0, mtype,
3504 maddr + size,
3505 4, &last.word,
3506 T4_MEMORY_WRITE);
636f9d37 3507 }
fc5ab020 3508 spin_unlock(&adapter->win0_lock);
636f9d37
VP
3509 }
3510 }
3511
3512 release_firmware(cf);
3513 if (ret)
3514 goto bye;
3515 }
3516
3517 /*
3518 * Issue a Capability Configuration command to the firmware to get it
3519 * to parse the Configuration File. We don't use t4_fw_config_file()
3520 * because we want the ability to modify various features after we've
3521 * processed the configuration file ...
3522 */
3523 memset(&caps_cmd, 0, sizeof(caps_cmd));
3524 caps_cmd.op_to_write =
e2ac9628
HS
3525 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3526 FW_CMD_REQUEST_F |
3527 FW_CMD_READ_F);
ce91a923 3528 caps_cmd.cfvalid_to_len16 =
5167865a
HS
3529 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
3530 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
3531 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
636f9d37
VP
3532 FW_LEN16(caps_cmd));
3533 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3534 &caps_cmd);
16e47624
HS
3535
3536 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3537 * Configuration File in FLASH), our last gasp effort is to use the
3538 * Firmware Configuration File which is embedded in the firmware. A
3539 * very few early versions of the firmware didn't have one embedded
3540 * but we can ignore those.
3541 */
3542 if (ret == -ENOENT) {
3543 memset(&caps_cmd, 0, sizeof(caps_cmd));
3544 caps_cmd.op_to_write =
e2ac9628
HS
3545 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3546 FW_CMD_REQUEST_F |
3547 FW_CMD_READ_F);
16e47624
HS
3548 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3549 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
3550 sizeof(caps_cmd), &caps_cmd);
3551 config_name = "Firmware Default";
3552 }
3553
3554 config_issued = 1;
636f9d37
VP
3555 if (ret < 0)
3556 goto bye;
3557
3558 finiver = ntohl(caps_cmd.finiver);
3559 finicsum = ntohl(caps_cmd.finicsum);
3560 cfcsum = ntohl(caps_cmd.cfcsum);
3561 if (finicsum != cfcsum)
3562 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3563 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3564 finicsum, cfcsum);
3565
636f9d37
VP
3566 /*
3567 * And now tell the firmware to use the configuration we just loaded.
3568 */
3569 caps_cmd.op_to_write =
e2ac9628
HS
3570 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3571 FW_CMD_REQUEST_F |
3572 FW_CMD_WRITE_F);
ce91a923 3573 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3574 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3575 NULL);
3576 if (ret < 0)
3577 goto bye;
3578
3579 /*
3580 * Tweak configuration based on system architecture, module
3581 * parameters, etc.
3582 */
3583 ret = adap_init0_tweaks(adapter);
3584 if (ret < 0)
3585 goto bye;
3586
3587 /*
3588 * And finally tell the firmware to initialize itself using the
3589 * parameters from the Configuration File.
3590 */
3591 ret = t4_fw_initialize(adapter, adapter->mbox);
3592 if (ret < 0)
3593 goto bye;
3594
06640310
HS
3595 /* Emit Firmware Configuration File information and return
3596 * successfully.
636f9d37 3597 */
636f9d37 3598 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
16e47624
HS
3599 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3600 config_name, finiver, cfcsum);
636f9d37
VP
3601 return 0;
3602
3603 /*
3604 * Something bad happened. Return the error ... (If the "error"
3605 * is that there's no Configuration File on the adapter we don't
3606 * want to issue a warning since this is fairly common.)
3607 */
3608bye:
16e47624
HS
3609 if (config_issued && ret != -ENOENT)
3610 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
3611 config_name, -ret);
636f9d37
VP
3612 return ret;
3613}
3614
16e47624
HS
3615static struct fw_info fw_info_array[] = {
3616 {
3617 .chip = CHELSIO_T4,
3618 .fs_name = FW4_CFNAME,
3619 .fw_mod_name = FW4_FNAME,
3620 .fw_hdr = {
3621 .chip = FW_HDR_CHIP_T4,
3622 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
3623 .intfver_nic = FW_INTFVER(T4, NIC),
3624 .intfver_vnic = FW_INTFVER(T4, VNIC),
3625 .intfver_ri = FW_INTFVER(T4, RI),
3626 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
3627 .intfver_fcoe = FW_INTFVER(T4, FCOE),
3628 },
3629 }, {
3630 .chip = CHELSIO_T5,
3631 .fs_name = FW5_CFNAME,
3632 .fw_mod_name = FW5_FNAME,
3633 .fw_hdr = {
3634 .chip = FW_HDR_CHIP_T5,
3635 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
3636 .intfver_nic = FW_INTFVER(T5, NIC),
3637 .intfver_vnic = FW_INTFVER(T5, VNIC),
3638 .intfver_ri = FW_INTFVER(T5, RI),
3639 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
3640 .intfver_fcoe = FW_INTFVER(T5, FCOE),
3641 },
3ccc6cf7
HS
3642 }, {
3643 .chip = CHELSIO_T6,
3644 .fs_name = FW6_CFNAME,
3645 .fw_mod_name = FW6_FNAME,
3646 .fw_hdr = {
3647 .chip = FW_HDR_CHIP_T6,
3648 .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
3649 .intfver_nic = FW_INTFVER(T6, NIC),
3650 .intfver_vnic = FW_INTFVER(T6, VNIC),
3651 .intfver_ofld = FW_INTFVER(T6, OFLD),
3652 .intfver_ri = FW_INTFVER(T6, RI),
3653 .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
3654 .intfver_iscsi = FW_INTFVER(T6, ISCSI),
3655 .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
3656 .intfver_fcoe = FW_INTFVER(T6, FCOE),
3657 },
16e47624 3658 }
3ccc6cf7 3659
16e47624
HS
3660};
3661
3662static struct fw_info *find_fw_info(int chip)
3663{
3664 int i;
3665
3666 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
3667 if (fw_info_array[i].chip == chip)
3668 return &fw_info_array[i];
3669 }
3670 return NULL;
3671}
3672
b8ff05a9
DM
3673/*
3674 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3675 */
3676static int adap_init0(struct adapter *adap)
3677{
3678 int ret;
3679 u32 v, port_vec;
3680 enum dev_state state;
3681 u32 params[7], val[7];
9a4da2cd 3682 struct fw_caps_config_cmd caps_cmd;
dcf7b6f5 3683 int reset = 1;
b8ff05a9 3684
ae469b68
HS
3685 /* Grab Firmware Device Log parameters as early as possible so we have
3686 * access to it for debugging, etc.
3687 */
3688 ret = t4_init_devlog_params(adap);
3689 if (ret < 0)
3690 return ret;
3691
666224d4
HS
3692 /* Contact FW, advertising Master capability */
3693 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
b8ff05a9
DM
3694 if (ret < 0) {
3695 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3696 ret);
3697 return ret;
3698 }
636f9d37
VP
3699 if (ret == adap->mbox)
3700 adap->flags |= MASTER_PF;
b8ff05a9 3701
636f9d37
VP
3702 /*
3703 * If we're the Master PF Driver and the device is uninitialized,
3704 * then let's consider upgrading the firmware ... (We always want
3705 * to check the firmware version number in order to A. get it for
3706 * later reporting and B. to warn if the currently loaded firmware
3707 * is excessively mismatched relative to the driver.)
3708 */
16e47624
HS
3709 t4_get_fw_version(adap, &adap->params.fw_vers);
3710 t4_get_tp_version(adap, &adap->params.tp_vers);
a69265e9
HS
3711 ret = t4_check_fw_version(adap);
3712 /* If firmware is too old (not supported by driver) force an update. */
21d11bd6 3713 if (ret)
a69265e9 3714 state = DEV_STATE_UNINIT;
636f9d37 3715 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
16e47624
HS
3716 struct fw_info *fw_info;
3717 struct fw_hdr *card_fw;
3718 const struct firmware *fw;
3719 const u8 *fw_data = NULL;
3720 unsigned int fw_size = 0;
3721
3722 /* This is the firmware whose headers the driver was compiled
3723 * against
3724 */
3725 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
3726 if (fw_info == NULL) {
3727 dev_err(adap->pdev_dev,
3728 "unable to get firmware info for chip %d.\n",
3729 CHELSIO_CHIP_VERSION(adap->params.chip));
3730 return -EINVAL;
636f9d37 3731 }
16e47624
HS
3732
3733 /* allocate memory to read the header of the firmware on the
3734 * card
3735 */
3736 card_fw = t4_alloc_mem(sizeof(*card_fw));
3737
3738 /* Get FW from from /lib/firmware/ */
3739 ret = request_firmware(&fw, fw_info->fw_mod_name,
3740 adap->pdev_dev);
3741 if (ret < 0) {
3742 dev_err(adap->pdev_dev,
3743 "unable to load firmware image %s, error %d\n",
3744 fw_info->fw_mod_name, ret);
3745 } else {
3746 fw_data = fw->data;
3747 fw_size = fw->size;
3748 }
3749
3750 /* upgrade FW logic */
3751 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
3752 state, &reset);
3753
3754 /* Cleaning up */
0b5b6bee 3755 release_firmware(fw);
16e47624
HS
3756 t4_free_mem(card_fw);
3757
636f9d37 3758 if (ret < 0)
16e47624 3759 goto bye;
636f9d37 3760 }
b8ff05a9 3761
636f9d37
VP
3762 /*
3763 * Grab VPD parameters. This should be done after we establish a
3764 * connection to the firmware since some of the VPD parameters
3765 * (notably the Core Clock frequency) are retrieved via requests to
3766 * the firmware. On the other hand, we need these fairly early on
3767 * so we do this right after getting ahold of the firmware.
3768 */
098ef6c2 3769 ret = t4_get_vpd_params(adap, &adap->params.vpd);
a0881cab
DM
3770 if (ret < 0)
3771 goto bye;
a0881cab 3772
636f9d37 3773 /*
13ee15d3
VP
3774 * Find out what ports are available to us. Note that we need to do
3775 * this before calling adap_init0_no_config() since it needs nports
3776 * and portvec ...
636f9d37
VP
3777 */
3778 v =
5167865a
HS
3779 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3780 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
b2612722 3781 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
a0881cab
DM
3782 if (ret < 0)
3783 goto bye;
3784
636f9d37
VP
3785 adap->params.nports = hweight32(port_vec);
3786 adap->params.portvec = port_vec;
3787
06640310
HS
3788 /* If the firmware is initialized already, emit a simply note to that
3789 * effect. Otherwise, it's time to try initializing the adapter.
636f9d37
VP
3790 */
3791 if (state == DEV_STATE_INIT) {
3792 dev_info(adap->pdev_dev, "Coming up as %s: "\
3793 "Adapter already initialized\n",
3794 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
636f9d37
VP
3795 } else {
3796 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3797 "Initializing adapter\n");
06640310
HS
3798
3799 /* Find out whether we're dealing with a version of the
3800 * firmware which has configuration file support.
636f9d37 3801 */
06640310
HS
3802 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3803 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
b2612722 3804 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
06640310 3805 params, val);
13ee15d3 3806
06640310
HS
3807 /* If the firmware doesn't support Configuration Files,
3808 * return an error.
3809 */
3810 if (ret < 0) {
3811 dev_err(adap->pdev_dev, "firmware doesn't support "
3812 "Firmware Configuration Files\n");
3813 goto bye;
3814 }
3815
3816 /* The firmware provides us with a memory buffer where we can
3817 * load a Configuration File from the host if we want to
3818 * override the Configuration File in flash.
3819 */
3820 ret = adap_init0_config(adap, reset);
3821 if (ret == -ENOENT) {
3822 dev_err(adap->pdev_dev, "no Configuration File "
3823 "present on adapter.\n");
3824 goto bye;
636f9d37
VP
3825 }
3826 if (ret < 0) {
06640310
HS
3827 dev_err(adap->pdev_dev, "could not initialize "
3828 "adapter, error %d\n", -ret);
636f9d37
VP
3829 goto bye;
3830 }
3831 }
3832
06640310
HS
3833 /* Give the SGE code a chance to pull in anything that it needs ...
3834 * Note that this must be called after we retrieve our VPD parameters
3835 * in order to know how to convert core ticks to seconds, etc.
636f9d37 3836 */
06640310
HS
3837 ret = t4_sge_init(adap);
3838 if (ret < 0)
3839 goto bye;
636f9d37 3840
9a4da2cd
VP
3841 if (is_bypass_device(adap->pdev->device))
3842 adap->params.bypass = 1;
3843
636f9d37
VP
3844 /*
3845 * Grab some of our basic fundamental operating parameters.
3846 */
3847#define FW_PARAM_DEV(param) \
5167865a
HS
3848 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3849 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
636f9d37 3850
b8ff05a9 3851#define FW_PARAM_PFVF(param) \
5167865a
HS
3852 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3853 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3854 FW_PARAMS_PARAM_Y_V(0) | \
3855 FW_PARAMS_PARAM_Z_V(0)
b8ff05a9 3856
636f9d37 3857 params[0] = FW_PARAM_PFVF(EQ_START);
b8ff05a9
DM
3858 params[1] = FW_PARAM_PFVF(L2T_START);
3859 params[2] = FW_PARAM_PFVF(L2T_END);
3860 params[3] = FW_PARAM_PFVF(FILTER_START);
3861 params[4] = FW_PARAM_PFVF(FILTER_END);
e46dab4d 3862 params[5] = FW_PARAM_PFVF(IQFLINT_START);
b2612722 3863 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
b8ff05a9
DM
3864 if (ret < 0)
3865 goto bye;
636f9d37
VP
3866 adap->sge.egr_start = val[0];
3867 adap->l2t_start = val[1];
3868 adap->l2t_end = val[2];
b8ff05a9
DM
3869 adap->tids.ftid_base = val[3];
3870 adap->tids.nftids = val[4] - val[3] + 1;
e46dab4d 3871 adap->sge.ingr_start = val[5];
b8ff05a9 3872
4b8e27a8
HS
3873 /* qids (ingress/egress) returned from firmware can be anywhere
3874 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3875 * Hence driver needs to allocate memory for this range to
3876 * store the queue info. Get the highest IQFLINT/EQ index returned
3877 * in FW_EQ_*_CMD.alloc command.
3878 */
3879 params[0] = FW_PARAM_PFVF(EQ_END);
3880 params[1] = FW_PARAM_PFVF(IQFLINT_END);
b2612722 3881 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
4b8e27a8
HS
3882 if (ret < 0)
3883 goto bye;
3884 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
3885 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
3886
3887 adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
3888 sizeof(*adap->sge.egr_map), GFP_KERNEL);
3889 if (!adap->sge.egr_map) {
3890 ret = -ENOMEM;
3891 goto bye;
3892 }
3893
3894 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
3895 sizeof(*adap->sge.ingr_map), GFP_KERNEL);
3896 if (!adap->sge.ingr_map) {
3897 ret = -ENOMEM;
3898 goto bye;
3899 }
3900
3901 /* Allocate the memory for the vaious egress queue bitmaps
5b377d11 3902 * ie starving_fl, txq_maperr and blocked_fl.
4b8e27a8
HS
3903 */
3904 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3905 sizeof(long), GFP_KERNEL);
3906 if (!adap->sge.starving_fl) {
3907 ret = -ENOMEM;
3908 goto bye;
3909 }
3910
3911 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3912 sizeof(long), GFP_KERNEL);
3913 if (!adap->sge.txq_maperr) {
3914 ret = -ENOMEM;
3915 goto bye;
3916 }
3917
5b377d11
HS
3918#ifdef CONFIG_DEBUG_FS
3919 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
3920 sizeof(long), GFP_KERNEL);
3921 if (!adap->sge.blocked_fl) {
3922 ret = -ENOMEM;
3923 goto bye;
3924 }
3925#endif
3926
b5a02f50
AB
3927 params[0] = FW_PARAM_PFVF(CLIP_START);
3928 params[1] = FW_PARAM_PFVF(CLIP_END);
b2612722 3929 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
b5a02f50
AB
3930 if (ret < 0)
3931 goto bye;
3932 adap->clipt_start = val[0];
3933 adap->clipt_end = val[1];
3934
636f9d37
VP
3935 /* query params related to active filter region */
3936 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3937 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
b2612722 3938 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
636f9d37
VP
3939 /* If Active filter size is set we enable establishing
3940 * offload connection through firmware work request
3941 */
3942 if ((val[0] != val[1]) && (ret >= 0)) {
3943 adap->flags |= FW_OFLD_CONN;
3944 adap->tids.aftid_base = val[0];
3945 adap->tids.aftid_end = val[1];
3946 }
3947
b407a4a9
VP
3948 /* If we're running on newer firmware, let it know that we're
3949 * prepared to deal with encapsulated CPL messages. Older
3950 * firmware won't understand this and we'll just get
3951 * unencapsulated messages ...
3952 */
3953 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
3954 val[0] = 1;
b2612722 3955 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
b407a4a9 3956
1ac0f095
KS
3957 /*
3958 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3959 * capability. Earlier versions of the firmware didn't have the
3960 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3961 * permission to use ULPTX MEMWRITE DSGL.
3962 */
3963 if (is_t4(adap->params.chip)) {
3964 adap->params.ulptx_memwrite_dsgl = false;
3965 } else {
3966 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
b2612722 3967 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1ac0f095
KS
3968 1, params, val);
3969 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
3970 }
3971
636f9d37
VP
3972 /*
3973 * Get device capabilities so we can determine what resources we need
3974 * to manage.
3975 */
3976 memset(&caps_cmd, 0, sizeof(caps_cmd));
e2ac9628
HS
3977 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
3978 FW_CMD_REQUEST_F | FW_CMD_READ_F);
ce91a923 3979 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
636f9d37
VP
3980 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3981 &caps_cmd);
3982 if (ret < 0)
3983 goto bye;
3984
13ee15d3 3985 if (caps_cmd.ofldcaps) {
b8ff05a9
DM
3986 /* query offload-related parameters */
3987 params[0] = FW_PARAM_DEV(NTID);
3988 params[1] = FW_PARAM_PFVF(SERVER_START);
3989 params[2] = FW_PARAM_PFVF(SERVER_END);
3990 params[3] = FW_PARAM_PFVF(TDDP_START);
3991 params[4] = FW_PARAM_PFVF(TDDP_END);
3992 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
b2612722 3993 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 3994 params, val);
b8ff05a9
DM
3995 if (ret < 0)
3996 goto bye;
3997 adap->tids.ntids = val[0];
3998 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3999 adap->tids.stid_base = val[1];
4000 adap->tids.nstids = val[2] - val[1] + 1;
636f9d37 4001 /*
dbedd44e 4002 * Setup server filter region. Divide the available filter
636f9d37
VP
4003 * region into two parts. Regular filters get 1/3rd and server
4004 * filters get 2/3rd part. This is only enabled if workarond
4005 * path is enabled.
4006 * 1. For regular filters.
4007 * 2. Server filter: This are special filters which are used
4008 * to redirect SYN packets to offload queue.
4009 */
4010 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4011 adap->tids.sftid_base = adap->tids.ftid_base +
4012 DIV_ROUND_UP(adap->tids.nftids, 3);
4013 adap->tids.nsftids = adap->tids.nftids -
4014 DIV_ROUND_UP(adap->tids.nftids, 3);
4015 adap->tids.nftids = adap->tids.sftid_base -
4016 adap->tids.ftid_base;
4017 }
b8ff05a9
DM
4018 adap->vres.ddp.start = val[3];
4019 adap->vres.ddp.size = val[4] - val[3] + 1;
4020 adap->params.ofldq_wr_cred = val[5];
636f9d37 4021
b8ff05a9
DM
4022 adap->params.offload = 1;
4023 }
636f9d37 4024 if (caps_cmd.rdmacaps) {
b8ff05a9
DM
4025 params[0] = FW_PARAM_PFVF(STAG_START);
4026 params[1] = FW_PARAM_PFVF(STAG_END);
4027 params[2] = FW_PARAM_PFVF(RQ_START);
4028 params[3] = FW_PARAM_PFVF(RQ_END);
4029 params[4] = FW_PARAM_PFVF(PBL_START);
4030 params[5] = FW_PARAM_PFVF(PBL_END);
b2612722 4031 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
636f9d37 4032 params, val);
b8ff05a9
DM
4033 if (ret < 0)
4034 goto bye;
4035 adap->vres.stag.start = val[0];
4036 adap->vres.stag.size = val[1] - val[0] + 1;
4037 adap->vres.rq.start = val[2];
4038 adap->vres.rq.size = val[3] - val[2] + 1;
4039 adap->vres.pbl.start = val[4];
4040 adap->vres.pbl.size = val[5] - val[4] + 1;
a0881cab
DM
4041
4042 params[0] = FW_PARAM_PFVF(SQRQ_START);
4043 params[1] = FW_PARAM_PFVF(SQRQ_END);
4044 params[2] = FW_PARAM_PFVF(CQ_START);
4045 params[3] = FW_PARAM_PFVF(CQ_END);
1ae970e0
DM
4046 params[4] = FW_PARAM_PFVF(OCQ_START);
4047 params[5] = FW_PARAM_PFVF(OCQ_END);
b2612722 4048 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5c937dd3 4049 val);
a0881cab
DM
4050 if (ret < 0)
4051 goto bye;
4052 adap->vres.qp.start = val[0];
4053 adap->vres.qp.size = val[1] - val[0] + 1;
4054 adap->vres.cq.start = val[2];
4055 adap->vres.cq.size = val[3] - val[2] + 1;
1ae970e0
DM
4056 adap->vres.ocq.start = val[4];
4057 adap->vres.ocq.size = val[5] - val[4] + 1;
4c2c5763
HS
4058
4059 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
4060 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
b2612722 4061 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5c937dd3 4062 val);
4c2c5763
HS
4063 if (ret < 0) {
4064 adap->params.max_ordird_qp = 8;
4065 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
4066 ret = 0;
4067 } else {
4068 adap->params.max_ordird_qp = val[0];
4069 adap->params.max_ird_adapter = val[1];
4070 }
4071 dev_info(adap->pdev_dev,
4072 "max_ordird_qp %d max_ird_adapter %d\n",
4073 adap->params.max_ordird_qp,
4074 adap->params.max_ird_adapter);
b8ff05a9 4075 }
636f9d37 4076 if (caps_cmd.iscsicaps) {
b8ff05a9
DM
4077 params[0] = FW_PARAM_PFVF(ISCSI_START);
4078 params[1] = FW_PARAM_PFVF(ISCSI_END);
b2612722 4079 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
636f9d37 4080 params, val);
b8ff05a9
DM
4081 if (ret < 0)
4082 goto bye;
4083 adap->vres.iscsi.start = val[0];
4084 adap->vres.iscsi.size = val[1] - val[0] + 1;
4085 }
4086#undef FW_PARAM_PFVF
4087#undef FW_PARAM_DEV
4088
92e7ae71
HS
4089 /* The MTU/MSS Table is initialized by now, so load their values. If
4090 * we're initializing the adapter, then we'll make any modifications
4091 * we want to the MTU/MSS Table and also initialize the congestion
4092 * parameters.
636f9d37 4093 */
b8ff05a9 4094 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
92e7ae71
HS
4095 if (state != DEV_STATE_INIT) {
4096 int i;
4097
4098 /* The default MTU Table contains values 1492 and 1500.
4099 * However, for TCP, it's better to have two values which are
4100 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4101 * This allows us to have a TCP Data Payload which is a
4102 * multiple of 8 regardless of what combination of TCP Options
4103 * are in use (always a multiple of 4 bytes) which is
4104 * important for performance reasons. For instance, if no
4105 * options are in use, then we have a 20-byte IP header and a
4106 * 20-byte TCP header. In this case, a 1500-byte MSS would
4107 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4108 * which is not a multiple of 8. So using an MSS of 1488 in
4109 * this case results in a TCP Data Payload of 1448 bytes which
4110 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4111 * Stamps have been negotiated, then an MTU of 1500 bytes
4112 * results in a TCP Data Payload of 1448 bytes which, as
4113 * above, is a multiple of 8 bytes ...
4114 */
4115 for (i = 0; i < NMTUS; i++)
4116 if (adap->params.mtus[i] == 1492) {
4117 adap->params.mtus[i] = 1488;
4118 break;
4119 }
7ee9ff94 4120
92e7ae71
HS
4121 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4122 adap->params.b_wnd);
4123 }
df64e4d3 4124 t4_init_sge_params(adap);
636f9d37 4125 adap->flags |= FW_OK;
c1e9af0c 4126 t4_init_tp_params(adap);
b8ff05a9
DM
4127 return 0;
4128
4129 /*
636f9d37
VP
4130 * Something bad happened. If a command timed out or failed with EIO
4131 * FW does not operate within its spec or something catastrophic
4132 * happened to HW/FW, stop issuing commands.
b8ff05a9 4133 */
636f9d37 4134bye:
4b8e27a8
HS
4135 kfree(adap->sge.egr_map);
4136 kfree(adap->sge.ingr_map);
4137 kfree(adap->sge.starving_fl);
4138 kfree(adap->sge.txq_maperr);
5b377d11
HS
4139#ifdef CONFIG_DEBUG_FS
4140 kfree(adap->sge.blocked_fl);
4141#endif
636f9d37
VP
4142 if (ret != -ETIMEDOUT && ret != -EIO)
4143 t4_fw_bye(adap, adap->mbox);
b8ff05a9
DM
4144 return ret;
4145}
4146
204dc3c0
DM
4147/* EEH callbacks */
4148
4149static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4150 pci_channel_state_t state)
4151{
4152 int i;
4153 struct adapter *adap = pci_get_drvdata(pdev);
4154
4155 if (!adap)
4156 goto out;
4157
4158 rtnl_lock();
4159 adap->flags &= ~FW_OK;
4160 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
9fe6cb58 4161 spin_lock(&adap->stats_lock);
204dc3c0
DM
4162 for_each_port(adap, i) {
4163 struct net_device *dev = adap->port[i];
4164
4165 netif_device_detach(dev);
4166 netif_carrier_off(dev);
4167 }
9fe6cb58 4168 spin_unlock(&adap->stats_lock);
b37987e8 4169 disable_interrupts(adap);
204dc3c0
DM
4170 if (adap->flags & FULL_INIT_DONE)
4171 cxgb_down(adap);
4172 rtnl_unlock();
144be3d9
GS
4173 if ((adap->flags & DEV_ENABLED)) {
4174 pci_disable_device(pdev);
4175 adap->flags &= ~DEV_ENABLED;
4176 }
204dc3c0
DM
4177out: return state == pci_channel_io_perm_failure ?
4178 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4179}
4180
4181static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4182{
4183 int i, ret;
4184 struct fw_caps_config_cmd c;
4185 struct adapter *adap = pci_get_drvdata(pdev);
4186
4187 if (!adap) {
4188 pci_restore_state(pdev);
4189 pci_save_state(pdev);
4190 return PCI_ERS_RESULT_RECOVERED;
4191 }
4192
144be3d9
GS
4193 if (!(adap->flags & DEV_ENABLED)) {
4194 if (pci_enable_device(pdev)) {
4195 dev_err(&pdev->dev, "Cannot reenable PCI "
4196 "device after reset\n");
4197 return PCI_ERS_RESULT_DISCONNECT;
4198 }
4199 adap->flags |= DEV_ENABLED;
204dc3c0
DM
4200 }
4201
4202 pci_set_master(pdev);
4203 pci_restore_state(pdev);
4204 pci_save_state(pdev);
4205 pci_cleanup_aer_uncorrect_error_status(pdev);
4206
8203b509 4207 if (t4_wait_dev_ready(adap->regs) < 0)
204dc3c0 4208 return PCI_ERS_RESULT_DISCONNECT;
b2612722 4209 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
204dc3c0
DM
4210 return PCI_ERS_RESULT_DISCONNECT;
4211 adap->flags |= FW_OK;
4212 if (adap_init1(adap, &c))
4213 return PCI_ERS_RESULT_DISCONNECT;
4214
4215 for_each_port(adap, i) {
4216 struct port_info *p = adap2pinfo(adap, i);
4217
b2612722 4218 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
060e0c75 4219 NULL, NULL);
204dc3c0
DM
4220 if (ret < 0)
4221 return PCI_ERS_RESULT_DISCONNECT;
4222 p->viid = ret;
4223 p->xact_addr_filt = -1;
4224 }
4225
4226 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4227 adap->params.b_wnd);
1ae970e0 4228 setup_memwin(adap);
204dc3c0
DM
4229 if (cxgb_up(adap))
4230 return PCI_ERS_RESULT_DISCONNECT;
4231 return PCI_ERS_RESULT_RECOVERED;
4232}
4233
4234static void eeh_resume(struct pci_dev *pdev)
4235{
4236 int i;
4237 struct adapter *adap = pci_get_drvdata(pdev);
4238
4239 if (!adap)
4240 return;
4241
4242 rtnl_lock();
4243 for_each_port(adap, i) {
4244 struct net_device *dev = adap->port[i];
4245
4246 if (netif_running(dev)) {
4247 link_start(dev);
4248 cxgb_set_rxmode(dev);
4249 }
4250 netif_device_attach(dev);
4251 }
4252 rtnl_unlock();
4253}
4254
3646f0e5 4255static const struct pci_error_handlers cxgb4_eeh = {
204dc3c0
DM
4256 .error_detected = eeh_err_detected,
4257 .slot_reset = eeh_slot_reset,
4258 .resume = eeh_resume,
4259};
4260
57d8b764 4261static inline bool is_x_10g_port(const struct link_config *lc)
b8ff05a9 4262{
57d8b764
KS
4263 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
4264 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
b8ff05a9
DM
4265}
4266
c887ad0e
HS
4267static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
4268 unsigned int us, unsigned int cnt,
b8ff05a9
DM
4269 unsigned int size, unsigned int iqe_size)
4270{
c887ad0e 4271 q->adap = adap;
812034f1 4272 cxgb4_set_rspq_intr_params(q, us, cnt);
b8ff05a9
DM
4273 q->iqe_len = iqe_size;
4274 q->size = size;
4275}
4276
4277/*
4278 * Perform default configuration of DMA queues depending on the number and type
4279 * of ports we found and the number of available CPUs. Most settings can be
4280 * modified by the admin prior to actual use.
4281 */
91744948 4282static void cfg_queues(struct adapter *adap)
b8ff05a9
DM
4283{
4284 struct sge *s = &adap->sge;
688848b1
AB
4285 int i, n10g = 0, qidx = 0;
4286#ifndef CONFIG_CHELSIO_T4_DCB
4287 int q10g = 0;
4288#endif
cf38be6d 4289 int ciq_size;
b8ff05a9
DM
4290
4291 for_each_port(adap, i)
57d8b764 4292 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
688848b1
AB
4293#ifdef CONFIG_CHELSIO_T4_DCB
4294 /* For Data Center Bridging support we need to be able to support up
4295 * to 8 Traffic Priorities; each of which will be assigned to its
4296 * own TX Queue in order to prevent Head-Of-Line Blocking.
4297 */
4298 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
4299 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
4300 MAX_ETH_QSETS, adap->params.nports * 8);
4301 BUG_ON(1);
4302 }
b8ff05a9 4303
688848b1
AB
4304 for_each_port(adap, i) {
4305 struct port_info *pi = adap2pinfo(adap, i);
4306
4307 pi->first_qset = qidx;
4308 pi->nqsets = 8;
4309 qidx += pi->nqsets;
4310 }
4311#else /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4312 /*
4313 * We default to 1 queue per non-10G port and up to # of cores queues
4314 * per 10G port.
4315 */
4316 if (n10g)
4317 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5952dde7
YM
4318 if (q10g > netif_get_num_default_rss_queues())
4319 q10g = netif_get_num_default_rss_queues();
b8ff05a9
DM
4320
4321 for_each_port(adap, i) {
4322 struct port_info *pi = adap2pinfo(adap, i);
4323
4324 pi->first_qset = qidx;
57d8b764 4325 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
b8ff05a9
DM
4326 qidx += pi->nqsets;
4327 }
688848b1 4328#endif /* !CONFIG_CHELSIO_T4_DCB */
b8ff05a9
DM
4329
4330 s->ethqsets = qidx;
4331 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4332
4333 if (is_offload(adap)) {
4334 /*
4335 * For offload we use 1 queue/channel if all ports are up to 1G,
4336 * otherwise we divide all available queues amongst the channels
4337 * capped by the number of available cores.
4338 */
4339 if (n10g) {
f90ce561 4340 i = min_t(int, ARRAY_SIZE(s->iscsirxq),
b8ff05a9 4341 num_online_cpus());
f90ce561 4342 s->iscsiqsets = roundup(i, adap->params.nports);
b8ff05a9 4343 } else
f90ce561 4344 s->iscsiqsets = adap->params.nports;
b8ff05a9
DM
4345 /* For RDMA one Rx queue per channel suffices */
4346 s->rdmaqs = adap->params.nports;
f36e58e5
HS
4347 /* Try and allow at least 1 CIQ per cpu rounding down
4348 * to the number of ports, with a minimum of 1 per port.
4349 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4350 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4351 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4352 */
4353 s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
4354 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
4355 adap->params.nports;
4356 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
f2692d16
VP
4357
4358 if (!is_t4(adap->params.chip))
4359 s->niscsitq = s->iscsiqsets;
b8ff05a9
DM
4360 }
4361
4362 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4363 struct sge_eth_rxq *r = &s->ethrxq[i];
4364
c887ad0e 4365 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
b8ff05a9
DM
4366 r->fl.size = 72;
4367 }
4368
4369 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4370 s->ethtxq[i].q.size = 1024;
4371
4372 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4373 s->ctrlq[i].q.size = 512;
4374
4375 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4376 s->ofldtxq[i].q.size = 1024;
4377
f90ce561
HS
4378 for (i = 0; i < ARRAY_SIZE(s->iscsirxq); i++) {
4379 struct sge_ofld_rxq *r = &s->iscsirxq[i];
b8ff05a9 4380
c887ad0e 4381 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
b8ff05a9
DM
4382 r->rspq.uld = CXGB4_ULD_ISCSI;
4383 r->fl.size = 72;
4384 }
4385
f2692d16
VP
4386 if (!is_t4(adap->params.chip)) {
4387 for (i = 0; i < ARRAY_SIZE(s->iscsitrxq); i++) {
4388 struct sge_ofld_rxq *r = &s->iscsitrxq[i];
4389
4390 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
4391 r->rspq.uld = CXGB4_ULD_ISCSIT;
4392 r->fl.size = 72;
4393 }
4394 }
4395
b8ff05a9
DM
4396 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4397 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4398
c887ad0e 4399 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
b8ff05a9
DM
4400 r->rspq.uld = CXGB4_ULD_RDMA;
4401 r->fl.size = 72;
4402 }
4403
cf38be6d
HS
4404 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
4405 if (ciq_size > SGE_MAX_IQ_SIZE) {
4406 CH_WARN(adap, "CIQ size too small for available IQs\n");
4407 ciq_size = SGE_MAX_IQ_SIZE;
4408 }
4409
4410 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
4411 struct sge_ofld_rxq *r = &s->rdmaciq[i];
4412
c887ad0e 4413 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
cf38be6d
HS
4414 r->rspq.uld = CXGB4_ULD_RDMA;
4415 }
4416
c887ad0e
HS
4417 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
4418 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
b8ff05a9
DM
4419}
4420
4421/*
4422 * Reduce the number of Ethernet queues across all ports to at most n.
4423 * n provides at least one queue per port.
4424 */
91744948 4425static void reduce_ethqs(struct adapter *adap, int n)
b8ff05a9
DM
4426{
4427 int i;
4428 struct port_info *pi;
4429
4430 while (n < adap->sge.ethqsets)
4431 for_each_port(adap, i) {
4432 pi = adap2pinfo(adap, i);
4433 if (pi->nqsets > 1) {
4434 pi->nqsets--;
4435 adap->sge.ethqsets--;
4436 if (adap->sge.ethqsets <= n)
4437 break;
4438 }
4439 }
4440
4441 n = 0;
4442 for_each_port(adap, i) {
4443 pi = adap2pinfo(adap, i);
4444 pi->first_qset = n;
4445 n += pi->nqsets;
4446 }
4447}
4448
4449/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4450#define EXTRA_VECS 2
4451
91744948 4452static int enable_msix(struct adapter *adap)
b8ff05a9
DM
4453{
4454 int ofld_need = 0;
f36e58e5 4455 int i, want, need, allocated;
b8ff05a9
DM
4456 struct sge *s = &adap->sge;
4457 unsigned int nchan = adap->params.nports;
f36e58e5
HS
4458 struct msix_entry *entries;
4459
4460 entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
4461 GFP_KERNEL);
4462 if (!entries)
4463 return -ENOMEM;
b8ff05a9 4464
f36e58e5 4465 for (i = 0; i < MAX_INGQ + 1; ++i)
b8ff05a9
DM
4466 entries[i].entry = i;
4467
4468 want = s->max_ethqsets + EXTRA_VECS;
4469 if (is_offload(adap)) {
f2692d16
VP
4470 want += s->rdmaqs + s->rdmaciqs + s->iscsiqsets +
4471 s->niscsitq;
b8ff05a9 4472 /* need nchan for each possible ULD */
f2692d16
VP
4473 if (is_t4(adap->params.chip))
4474 ofld_need = 3 * nchan;
4475 else
4476 ofld_need = 4 * nchan;
b8ff05a9 4477 }
688848b1
AB
4478#ifdef CONFIG_CHELSIO_T4_DCB
4479 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4480 * each port.
4481 */
4482 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
4483#else
b8ff05a9 4484 need = adap->params.nports + EXTRA_VECS + ofld_need;
688848b1 4485#endif
f36e58e5
HS
4486 allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
4487 if (allocated < 0) {
4488 dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
4489 " not using MSI-X\n");
4490 kfree(entries);
4491 return allocated;
4492 }
b8ff05a9 4493
f36e58e5 4494 /* Distribute available vectors to the various queue groups.
c32ad224
AG
4495 * Every group gets its minimum requirement and NIC gets top
4496 * priority for leftovers.
4497 */
f36e58e5 4498 i = allocated - EXTRA_VECS - ofld_need;
c32ad224
AG
4499 if (i < s->max_ethqsets) {
4500 s->max_ethqsets = i;
4501 if (i < s->ethqsets)
4502 reduce_ethqs(adap, i);
4503 }
4504 if (is_offload(adap)) {
f36e58e5
HS
4505 if (allocated < want) {
4506 s->rdmaqs = nchan;
4507 s->rdmaciqs = nchan;
f2692d16
VP
4508
4509 if (!is_t4(adap->params.chip))
4510 s->niscsitq = nchan;
f36e58e5
HS
4511 }
4512
4513 /* leftovers go to OFLD */
4514 i = allocated - EXTRA_VECS - s->max_ethqsets -
f2692d16 4515 s->rdmaqs - s->rdmaciqs - s->niscsitq;
f90ce561 4516 s->iscsiqsets = (i / nchan) * nchan; /* round down */
f2692d16 4517
c32ad224 4518 }
f36e58e5 4519 for (i = 0; i < allocated; ++i)
c32ad224 4520 adap->msix_info[i].vec = entries[i].vector;
43eb4e82
HS
4521 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
4522 "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
f90ce561 4523 allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs,
43eb4e82 4524 s->rdmaciqs);
c32ad224 4525
f36e58e5 4526 kfree(entries);
c32ad224 4527 return 0;
b8ff05a9
DM
4528}
4529
4530#undef EXTRA_VECS
4531
91744948 4532static int init_rss(struct adapter *adap)
671b0060 4533{
c035e183
HS
4534 unsigned int i;
4535 int err;
4536
4537 err = t4_init_rss_mode(adap, adap->mbox);
4538 if (err)
4539 return err;
671b0060
DM
4540
4541 for_each_port(adap, i) {
4542 struct port_info *pi = adap2pinfo(adap, i);
4543
4544 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4545 if (!pi->rss)
4546 return -ENOMEM;
671b0060
DM
4547 }
4548 return 0;
4549}
4550
547fd272
HS
4551static int cxgb4_get_pcie_dev_link_caps(struct adapter *adap,
4552 enum pci_bus_speed *speed,
4553 enum pcie_link_width *width)
4554{
4555 u32 lnkcap1, lnkcap2;
4556 int err1, err2;
4557
4558#define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4559
4560 *speed = PCI_SPEED_UNKNOWN;
4561 *width = PCIE_LNK_WIDTH_UNKNOWN;
4562
4563 err1 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP,
4564 &lnkcap1);
4565 err2 = pcie_capability_read_dword(adap->pdev, PCI_EXP_LNKCAP2,
4566 &lnkcap2);
4567 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
4568 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
4569 *speed = PCIE_SPEED_8_0GT;
4570 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
4571 *speed = PCIE_SPEED_5_0GT;
4572 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
4573 *speed = PCIE_SPEED_2_5GT;
4574 }
4575 if (!err1) {
4576 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
4577 if (!lnkcap2) { /* pre-r3.0 */
4578 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
4579 *speed = PCIE_SPEED_5_0GT;
4580 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
4581 *speed = PCIE_SPEED_2_5GT;
4582 }
4583 }
4584
4585 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
4586 return err1 ? err1 : err2 ? err2 : -EINVAL;
4587 return 0;
4588}
4589
4590static void cxgb4_check_pcie_caps(struct adapter *adap)
4591{
4592 enum pcie_link_width width, width_cap;
4593 enum pci_bus_speed speed, speed_cap;
4594
4595#define PCIE_SPEED_STR(speed) \
4596 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4597 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4598 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4599 "Unknown")
4600
4601 if (cxgb4_get_pcie_dev_link_caps(adap, &speed_cap, &width_cap)) {
4602 dev_warn(adap->pdev_dev,
4603 "Unable to determine PCIe device BW capabilities\n");
4604 return;
4605 }
4606
4607 if (pcie_get_minimum_link(adap->pdev, &speed, &width) ||
4608 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
4609 dev_warn(adap->pdev_dev,
4610 "Unable to determine PCI Express bandwidth.\n");
4611 return;
4612 }
4613
4614 dev_info(adap->pdev_dev, "PCIe link speed is %s, device supports %s\n",
4615 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
4616 dev_info(adap->pdev_dev, "PCIe link width is x%d, device supports x%d\n",
4617 width, width_cap);
4618 if (speed < speed_cap || width < width_cap)
4619 dev_info(adap->pdev_dev,
4620 "A slot with more lanes and/or higher speed is "
4621 "suggested for optimal performance.\n");
4622}
4623
91744948 4624static void print_port_info(const struct net_device *dev)
b8ff05a9 4625{
b8ff05a9 4626 char buf[80];
118969ed 4627 char *bufp = buf;
f1a051b9 4628 const char *spd = "";
118969ed
DM
4629 const struct port_info *pi = netdev_priv(dev);
4630 const struct adapter *adap = pi->adapter;
f1a051b9
DM
4631
4632 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4633 spd = " 2.5 GT/s";
4634 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4635 spd = " 5 GT/s";
d2e752db
RD
4636 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
4637 spd = " 8 GT/s";
b8ff05a9 4638
118969ed
DM
4639 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4640 bufp += sprintf(bufp, "100/");
4641 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4642 bufp += sprintf(bufp, "1000/");
4643 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4644 bufp += sprintf(bufp, "10G/");
72aca4bf
KS
4645 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
4646 bufp += sprintf(bufp, "40G/");
118969ed
DM
4647 if (bufp != buf)
4648 --bufp;
72aca4bf 4649 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
118969ed 4650
547fd272 4651 netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
0a57a536 4652 adap->params.vpd.id,
d14807dd 4653 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
547fd272 4654 is_offload(adap) ? "R" : "",
118969ed
DM
4655 (adap->flags & USING_MSIX) ? " MSI-X" :
4656 (adap->flags & USING_MSI) ? " MSI" : "");
a94cd705
KS
4657 netdev_info(dev, "S/N: %s, P/N: %s\n",
4658 adap->params.vpd.sn, adap->params.vpd.pn);
b8ff05a9
DM
4659}
4660
91744948 4661static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
ef306b50 4662{
e5c8ae5f 4663 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
ef306b50
DM
4664}
4665
06546391
DM
4666/*
4667 * Free the following resources:
4668 * - memory used for tables
4669 * - MSI/MSI-X
4670 * - net devices
4671 * - resources FW is holding for us
4672 */
4673static void free_some_resources(struct adapter *adapter)
4674{
4675 unsigned int i;
4676
4677 t4_free_mem(adapter->l2t);
4678 t4_free_mem(adapter->tids.tid_tab);
4b8e27a8
HS
4679 kfree(adapter->sge.egr_map);
4680 kfree(adapter->sge.ingr_map);
4681 kfree(adapter->sge.starving_fl);
4682 kfree(adapter->sge.txq_maperr);
5b377d11
HS
4683#ifdef CONFIG_DEBUG_FS
4684 kfree(adapter->sge.blocked_fl);
4685#endif
06546391
DM
4686 disable_msi(adapter);
4687
4688 for_each_port(adapter, i)
671b0060 4689 if (adapter->port[i]) {
4f3a0fcf
HS
4690 struct port_info *pi = adap2pinfo(adapter, i);
4691
4692 if (pi->viid != 0)
4693 t4_free_vi(adapter, adapter->mbox, adapter->pf,
4694 0, pi->viid);
671b0060 4695 kfree(adap2pinfo(adapter, i)->rss);
06546391 4696 free_netdev(adapter->port[i]);
671b0060 4697 }
06546391 4698 if (adapter->flags & FW_OK)
b2612722 4699 t4_fw_bye(adapter, adapter->pf);
06546391
DM
4700}
4701
2ed28baa 4702#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
35d35682 4703#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
b8ff05a9 4704 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
22adfe0a 4705#define SEGMENT_SIZE 128
b8ff05a9 4706
d86bd29e
HS
4707static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
4708{
d86bd29e
HS
4709 u16 device_id;
4710
4711 /* Retrieve adapter's device ID */
4712 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
46cdc9be 4713
4714 switch (device_id >> 12) {
d86bd29e 4715 case CHELSIO_T4:
46cdc9be 4716 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
d86bd29e 4717 case CHELSIO_T5:
46cdc9be 4718 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
d86bd29e 4719 case CHELSIO_T6:
46cdc9be 4720 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
d86bd29e
HS
4721 default:
4722 dev_err(&pdev->dev, "Device %d is not supported\n",
4723 device_id);
d86bd29e 4724 }
46cdc9be 4725 return -EINVAL;
d86bd29e
HS
4726}
4727
1dd06ae8 4728static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
b8ff05a9 4729{
22adfe0a 4730 int func, i, err, s_qpp, qpp, num_seg;
b8ff05a9 4731 struct port_info *pi;
c8f44aff 4732 bool highdma = false;
b8ff05a9 4733 struct adapter *adapter = NULL;
d6ce2628 4734 void __iomem *regs;
d86bd29e
HS
4735 u32 whoami, pl_rev;
4736 enum chip_type chip;
b8ff05a9
DM
4737
4738 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4739
4740 err = pci_request_regions(pdev, KBUILD_MODNAME);
4741 if (err) {
4742 /* Just info, some other driver may have claimed the device. */
4743 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4744 return err;
4745 }
4746
b8ff05a9
DM
4747 err = pci_enable_device(pdev);
4748 if (err) {
4749 dev_err(&pdev->dev, "cannot enable PCI device\n");
4750 goto out_release_regions;
4751 }
4752
d6ce2628
HS
4753 regs = pci_ioremap_bar(pdev, 0);
4754 if (!regs) {
4755 dev_err(&pdev->dev, "cannot map device registers\n");
4756 err = -ENOMEM;
4757 goto out_disable_device;
4758 }
4759
8203b509
HS
4760 err = t4_wait_dev_ready(regs);
4761 if (err < 0)
4762 goto out_unmap_bar0;
4763
d6ce2628 4764 /* We control everything through one PF */
d86bd29e
HS
4765 whoami = readl(regs + PL_WHOAMI_A);
4766 pl_rev = REV_G(readl(regs + PL_REV_A));
4767 chip = get_chip_type(pdev, pl_rev);
4768 func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
4769 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
d6ce2628
HS
4770 if (func != ent->driver_data) {
4771 iounmap(regs);
4772 pci_disable_device(pdev);
4773 pci_save_state(pdev); /* to restore SR-IOV later */
4774 goto sriov;
4775 }
4776
b8ff05a9 4777 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
c8f44aff 4778 highdma = true;
b8ff05a9
DM
4779 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4780 if (err) {
4781 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4782 "coherent allocations\n");
d6ce2628 4783 goto out_unmap_bar0;
b8ff05a9
DM
4784 }
4785 } else {
4786 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4787 if (err) {
4788 dev_err(&pdev->dev, "no usable DMA configuration\n");
d6ce2628 4789 goto out_unmap_bar0;
b8ff05a9
DM
4790 }
4791 }
4792
4793 pci_enable_pcie_error_reporting(pdev);
ef306b50 4794 enable_pcie_relaxed_ordering(pdev);
b8ff05a9
DM
4795 pci_set_master(pdev);
4796 pci_save_state(pdev);
4797
4798 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4799 if (!adapter) {
4800 err = -ENOMEM;
d6ce2628 4801 goto out_unmap_bar0;
b8ff05a9
DM
4802 }
4803
29aaee65
AB
4804 adapter->workq = create_singlethread_workqueue("cxgb4");
4805 if (!adapter->workq) {
4806 err = -ENOMEM;
4807 goto out_free_adapter;
4808 }
4809
144be3d9
GS
4810 /* PCI device has been enabled */
4811 adapter->flags |= DEV_ENABLED;
4812
d6ce2628 4813 adapter->regs = regs;
b8ff05a9
DM
4814 adapter->pdev = pdev;
4815 adapter->pdev_dev = &pdev->dev;
3069ee9b 4816 adapter->mbox = func;
b2612722 4817 adapter->pf = func;
b8ff05a9
DM
4818 adapter->msg_enable = dflt_msg_enable;
4819 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4820
4821 spin_lock_init(&adapter->stats_lock);
4822 spin_lock_init(&adapter->tid_release_lock);
e327c225 4823 spin_lock_init(&adapter->win0_lock);
b8ff05a9
DM
4824
4825 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
881806bc
VP
4826 INIT_WORK(&adapter->db_full_task, process_db_full);
4827 INIT_WORK(&adapter->db_drop_task, process_db_drop);
b8ff05a9
DM
4828
4829 err = t4_prep_adapter(adapter);
4830 if (err)
d6ce2628
HS
4831 goto out_free_adapter;
4832
22adfe0a 4833
d14807dd 4834 if (!is_t4(adapter->params.chip)) {
f612b815
HS
4835 s_qpp = (QUEUESPERPAGEPF0_S +
4836 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
b2612722 4837 adapter->pf);
f612b815
HS
4838 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
4839 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
22adfe0a
SR
4840 num_seg = PAGE_SIZE / SEGMENT_SIZE;
4841
4842 /* Each segment size is 128B. Write coalescing is enabled only
4843 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4844 * queue is less no of segments that can be accommodated in
4845 * a page size.
4846 */
4847 if (qpp > num_seg) {
4848 dev_err(&pdev->dev,
4849 "Incorrect number of egress queues per page\n");
4850 err = -EINVAL;
d6ce2628 4851 goto out_free_adapter;
22adfe0a
SR
4852 }
4853 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
4854 pci_resource_len(pdev, 2));
4855 if (!adapter->bar2) {
4856 dev_err(&pdev->dev, "cannot map device bar2 region\n");
4857 err = -ENOMEM;
d6ce2628 4858 goto out_free_adapter;
22adfe0a
SR
4859 }
4860 }
4861
636f9d37 4862 setup_memwin(adapter);
b8ff05a9 4863 err = adap_init0(adapter);
5b377d11
HS
4864#ifdef CONFIG_DEBUG_FS
4865 bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
4866#endif
636f9d37 4867 setup_memwin_rdma(adapter);
b8ff05a9
DM
4868 if (err)
4869 goto out_unmap_bar;
4870
2a485cf7
HS
4871 /* configure SGE_STAT_CFG_A to read WC stats */
4872 if (!is_t4(adapter->params.chip))
676d6a75
HS
4873 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
4874 (is_t5(adapter->params.chip) ? STATMODE_V(0) :
4875 T6_STATMODE_V(0)));
2a485cf7 4876
b8ff05a9
DM
4877 for_each_port(adapter, i) {
4878 struct net_device *netdev;
4879
4880 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4881 MAX_ETH_QSETS);
4882 if (!netdev) {
4883 err = -ENOMEM;
4884 goto out_free_dev;
4885 }
4886
4887 SET_NETDEV_DEV(netdev, &pdev->dev);
4888
4889 adapter->port[i] = netdev;
4890 pi = netdev_priv(netdev);
4891 pi->adapter = adapter;
4892 pi->xact_addr_filt = -1;
b8ff05a9 4893 pi->port_id = i;
b8ff05a9
DM
4894 netdev->irq = pdev->irq;
4895
2ed28baa
MM
4896 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4897 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4898 NETIF_F_RXCSUM | NETIF_F_RXHASH |
f646968f 4899 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
c8f44aff
MM
4900 if (highdma)
4901 netdev->hw_features |= NETIF_F_HIGHDMA;
4902 netdev->features |= netdev->hw_features;
b8ff05a9
DM
4903 netdev->vlan_features = netdev->features & VLAN_FEAT;
4904
01789349
JP
4905 netdev->priv_flags |= IFF_UNICAST_FLT;
4906
b8ff05a9 4907 netdev->netdev_ops = &cxgb4_netdev_ops;
688848b1
AB
4908#ifdef CONFIG_CHELSIO_T4_DCB
4909 netdev->dcbnl_ops = &cxgb4_dcb_ops;
4910 cxgb4_dcb_state_init(netdev);
4911#endif
812034f1 4912 cxgb4_set_ethtool_ops(netdev);
b8ff05a9
DM
4913 }
4914
4915 pci_set_drvdata(pdev, adapter);
4916
4917 if (adapter->flags & FW_OK) {
060e0c75 4918 err = t4_port_init(adapter, func, func, 0);
b8ff05a9
DM
4919 if (err)
4920 goto out_free_dev;
098ef6c2
HS
4921 } else if (adapter->params.nports == 1) {
4922 /* If we don't have a connection to the firmware -- possibly
4923 * because of an error -- grab the raw VPD parameters so we
4924 * can set the proper MAC Address on the debug network
4925 * interface that we've created.
4926 */
4927 u8 hw_addr[ETH_ALEN];
4928 u8 *na = adapter->params.vpd.na;
4929
4930 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
4931 if (!err) {
4932 for (i = 0; i < ETH_ALEN; i++)
4933 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
4934 hex2val(na[2 * i + 1]));
4935 t4_set_hw_addr(adapter, 0, hw_addr);
4936 }
b8ff05a9
DM
4937 }
4938
098ef6c2 4939 /* Configure queues and allocate tables now, they can be needed as
b8ff05a9
DM
4940 * soon as the first register_netdev completes.
4941 */
4942 cfg_queues(adapter);
4943
5be9ed8d 4944 adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
b8ff05a9
DM
4945 if (!adapter->l2t) {
4946 /* We tolerate a lack of L2T, giving up some functionality */
4947 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4948 adapter->params.offload = 0;
4949 }
4950
b5a02f50 4951#if IS_ENABLED(CONFIG_IPV6)
eb72f74f
HS
4952 if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
4953 (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
4954 /* CLIP functionality is not present in hardware,
4955 * hence disable all offload features
b5a02f50
AB
4956 */
4957 dev_warn(&pdev->dev,
eb72f74f 4958 "CLIP not enabled in hardware, continuing\n");
b5a02f50 4959 adapter->params.offload = 0;
eb72f74f
HS
4960 } else {
4961 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
4962 adapter->clipt_end);
4963 if (!adapter->clipt) {
4964 /* We tolerate a lack of clip_table, giving up
4965 * some functionality
4966 */
4967 dev_warn(&pdev->dev,
4968 "could not allocate Clip table, continuing\n");
4969 adapter->params.offload = 0;
4970 }
b5a02f50
AB
4971 }
4972#endif
b8ff05a9
DM
4973 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4974 dev_warn(&pdev->dev, "could not allocate TID table, "
4975 "continuing\n");
4976 adapter->params.offload = 0;
4977 }
4978
9a1bb9f6
HS
4979 if (is_offload(adapter)) {
4980 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
4981 u32 hash_base, hash_reg;
4982
4983 if (chip <= CHELSIO_T5) {
4984 hash_reg = LE_DB_TID_HASHBASE_A;
4985 hash_base = t4_read_reg(adapter, hash_reg);
4986 adapter->tids.hash_base = hash_base / 4;
4987 } else {
4988 hash_reg = T6_LE_DB_HASH_TID_BASE_A;
4989 hash_base = t4_read_reg(adapter, hash_reg);
4990 adapter->tids.hash_base = hash_base;
4991 }
4992 }
4993 }
4994
f7cabcdd
DM
4995 /* See what interrupts we'll be using */
4996 if (msi > 1 && enable_msix(adapter) == 0)
4997 adapter->flags |= USING_MSIX;
4998 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4999 adapter->flags |= USING_MSI;
5000
547fd272
HS
5001 /* check for PCI Express bandwidth capabiltites */
5002 cxgb4_check_pcie_caps(adapter);
5003
671b0060
DM
5004 err = init_rss(adapter);
5005 if (err)
5006 goto out_free_dev;
5007
b8ff05a9
DM
5008 /*
5009 * The card is now ready to go. If any errors occur during device
5010 * registration we do not fail the whole card but rather proceed only
5011 * with the ports we manage to register successfully. However we must
5012 * register at least one net device.
5013 */
5014 for_each_port(adapter, i) {
a57cabe0
DM
5015 pi = adap2pinfo(adapter, i);
5016 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5017 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5018
b8ff05a9
DM
5019 err = register_netdev(adapter->port[i]);
5020 if (err)
b1a3c2b6 5021 break;
b1a3c2b6
DM
5022 adapter->chan_map[pi->tx_chan] = i;
5023 print_port_info(adapter->port[i]);
b8ff05a9 5024 }
b1a3c2b6 5025 if (i == 0) {
b8ff05a9
DM
5026 dev_err(&pdev->dev, "could not register any net devices\n");
5027 goto out_free_dev;
5028 }
b1a3c2b6
DM
5029 if (err) {
5030 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5031 err = 0;
6403eab1 5032 }
b8ff05a9
DM
5033
5034 if (cxgb4_debugfs_root) {
5035 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5036 cxgb4_debugfs_root);
5037 setup_debugfs(adapter);
5038 }
5039
6482aa7c
DLR
5040 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5041 pdev->needs_freset = 1;
5042
b8ff05a9
DM
5043 if (is_offload(adapter))
5044 attach_ulds(adapter);
5045
8e1e6059 5046sriov:
b8ff05a9 5047#ifdef CONFIG_PCI_IOV
7d6727cf 5048 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
b8ff05a9
DM
5049 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5050 dev_info(&pdev->dev,
5051 "instantiated %u virtual functions\n",
5052 num_vf[func]);
5053#endif
5054 return 0;
5055
5056 out_free_dev:
06546391 5057 free_some_resources(adapter);
b8ff05a9 5058 out_unmap_bar:
d14807dd 5059 if (!is_t4(adapter->params.chip))
22adfe0a 5060 iounmap(adapter->bar2);
b8ff05a9 5061 out_free_adapter:
29aaee65
AB
5062 if (adapter->workq)
5063 destroy_workqueue(adapter->workq);
5064
b8ff05a9 5065 kfree(adapter);
d6ce2628
HS
5066 out_unmap_bar0:
5067 iounmap(regs);
b8ff05a9
DM
5068 out_disable_device:
5069 pci_disable_pcie_error_reporting(pdev);
5070 pci_disable_device(pdev);
5071 out_release_regions:
5072 pci_release_regions(pdev);
b8ff05a9
DM
5073 return err;
5074}
5075
91744948 5076static void remove_one(struct pci_dev *pdev)
b8ff05a9
DM
5077{
5078 struct adapter *adapter = pci_get_drvdata(pdev);
5079
636f9d37 5080#ifdef CONFIG_PCI_IOV
b8ff05a9
DM
5081 pci_disable_sriov(pdev);
5082
636f9d37
VP
5083#endif
5084
b8ff05a9
DM
5085 if (adapter) {
5086 int i;
5087
29aaee65
AB
5088 /* Tear down per-adapter Work Queue first since it can contain
5089 * references to our adapter data structure.
5090 */
5091 destroy_workqueue(adapter->workq);
5092
b8ff05a9
DM
5093 if (is_offload(adapter))
5094 detach_ulds(adapter);
5095
b37987e8
HS
5096 disable_interrupts(adapter);
5097
b8ff05a9 5098 for_each_port(adapter, i)
8f3a7676 5099 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
b8ff05a9
DM
5100 unregister_netdev(adapter->port[i]);
5101
9f16dc2e 5102 debugfs_remove_recursive(adapter->debugfs_root);
b8ff05a9 5103
f2b7e78d
VP
5104 /* If we allocated filters, free up state associated with any
5105 * valid filters ...
5106 */
5107 if (adapter->tids.ftid_tab) {
5108 struct filter_entry *f = &adapter->tids.ftid_tab[0];
dca4faeb
VP
5109 for (i = 0; i < (adapter->tids.nftids +
5110 adapter->tids.nsftids); i++, f++)
f2b7e78d
VP
5111 if (f->valid)
5112 clear_filter(adapter, f);
5113 }
5114
aaefae9b
DM
5115 if (adapter->flags & FULL_INIT_DONE)
5116 cxgb_down(adapter);
b8ff05a9 5117
06546391 5118 free_some_resources(adapter);
b5a02f50
AB
5119#if IS_ENABLED(CONFIG_IPV6)
5120 t4_cleanup_clip_tbl(adapter);
5121#endif
b8ff05a9 5122 iounmap(adapter->regs);
d14807dd 5123 if (!is_t4(adapter->params.chip))
22adfe0a 5124 iounmap(adapter->bar2);
b8ff05a9 5125 pci_disable_pcie_error_reporting(pdev);
144be3d9
GS
5126 if ((adapter->flags & DEV_ENABLED)) {
5127 pci_disable_device(pdev);
5128 adapter->flags &= ~DEV_ENABLED;
5129 }
b8ff05a9 5130 pci_release_regions(pdev);
ee9a33b2 5131 synchronize_rcu();
8b662fe7 5132 kfree(adapter);
a069ec91 5133 } else
b8ff05a9
DM
5134 pci_release_regions(pdev);
5135}
5136
5137static struct pci_driver cxgb4_driver = {
5138 .name = KBUILD_MODNAME,
5139 .id_table = cxgb4_pci_tbl,
5140 .probe = init_one,
91744948 5141 .remove = remove_one,
687d705c 5142 .shutdown = remove_one,
204dc3c0 5143 .err_handler = &cxgb4_eeh,
b8ff05a9
DM
5144};
5145
5146static int __init cxgb4_init_module(void)
5147{
5148 int ret;
5149
5150 /* Debugfs support is optional, just warn if this fails */
5151 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5152 if (!cxgb4_debugfs_root)
428ac43f 5153 pr_warn("could not create debugfs entry, continuing\n");
b8ff05a9
DM
5154
5155 ret = pci_register_driver(&cxgb4_driver);
29aaee65 5156 if (ret < 0)
b8ff05a9 5157 debugfs_remove(cxgb4_debugfs_root);
01bcca68 5158
1bb60376 5159#if IS_ENABLED(CONFIG_IPV6)
b5a02f50
AB
5160 if (!inet6addr_registered) {
5161 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5162 inet6addr_registered = true;
5163 }
1bb60376 5164#endif
01bcca68 5165
b8ff05a9
DM
5166 return ret;
5167}
5168
5169static void __exit cxgb4_cleanup_module(void)
5170{
1bb60376 5171#if IS_ENABLED(CONFIG_IPV6)
1793c798 5172 if (inet6addr_registered) {
b5a02f50
AB
5173 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
5174 inet6addr_registered = false;
5175 }
1bb60376 5176#endif
b8ff05a9
DM
5177 pci_unregister_driver(&cxgb4_driver);
5178 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5179}
5180
5181module_init(cxgb4_init_module);
5182module_exit(cxgb4_cleanup_module);
This page took 0.96593 seconds and 5 git commands to generate.