cxgb4: Added support in debugfs to dump sge_qinfo
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
67
68 #include "cxgb4.h"
69 #include "t4_regs.h"
70 #include "t4_values.h"
71 #include "t4_msg.h"
72 #include "t4fw_api.h"
73 #include "cxgb4_dcb.h"
74 #include "cxgb4_debugfs.h"
75 #include "clip_tbl.h"
76 #include "l2t.h"
77
78 #ifdef DRV_VERSION
79 #undef DRV_VERSION
80 #endif
81 #define DRV_VERSION "2.0.0-ko"
82 #define DRV_DESC "Chelsio T4/T5 Network Driver"
83
84 enum {
85 MAX_TXQ_ENTRIES = 16384,
86 MAX_CTRL_TXQ_ENTRIES = 1024,
87 MAX_RSPQ_ENTRIES = 16384,
88 MAX_RX_BUFFERS = 16384,
89 MIN_TXQ_ENTRIES = 32,
90 MIN_CTRL_TXQ_ENTRIES = 32,
91 MIN_RSPQ_ENTRIES = 128,
92 MIN_FL_ENTRIES = 16
93 };
94
95 /* Host shadow copy of ingress filter entry. This is in host native format
96 * and doesn't match the ordering or bit order, etc. of the hardware of the
97 * firmware command. The use of bit-field structure elements is purely to
98 * remind ourselves of the field size limitations and save memory in the case
99 * where the filter table is large.
100 */
101 struct filter_entry {
102 /* Administrative fields for filter.
103 */
104 u32 valid:1; /* filter allocated and valid */
105 u32 locked:1; /* filter is administratively locked */
106
107 u32 pending:1; /* filter action is pending firmware reply */
108 u32 smtidx:8; /* Source MAC Table index for smac */
109 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
110
111 /* The filter itself. Most of this is a straight copy of information
112 * provided by the extended ioctl(). Some fields are translated to
113 * internal forms -- for instance the Ingress Queue ID passed in from
114 * the ioctl() is translated into the Absolute Ingress Queue ID.
115 */
116 struct ch_filter_specification fs;
117 };
118
119 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
120 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
121 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
122
123 /* Macros needed to support the PCI Device ID Table ...
124 */
125 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
126 static struct pci_device_id cxgb4_pci_tbl[] = {
127 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
128
129 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
130 * called for both.
131 */
132 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
133
134 #define CH_PCI_ID_TABLE_ENTRY(devid) \
135 {PCI_VDEVICE(CHELSIO, (devid)), 4}
136
137 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
138 { 0, } \
139 }
140
141 #include "t4_pci_id_tbl.h"
142
143 #define FW4_FNAME "cxgb4/t4fw.bin"
144 #define FW5_FNAME "cxgb4/t5fw.bin"
145 #define FW4_CFNAME "cxgb4/t4-config.txt"
146 #define FW5_CFNAME "cxgb4/t5-config.txt"
147
148 MODULE_DESCRIPTION(DRV_DESC);
149 MODULE_AUTHOR("Chelsio Communications");
150 MODULE_LICENSE("Dual BSD/GPL");
151 MODULE_VERSION(DRV_VERSION);
152 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
153 MODULE_FIRMWARE(FW4_FNAME);
154 MODULE_FIRMWARE(FW5_FNAME);
155
156 /*
157 * Normally we're willing to become the firmware's Master PF but will be happy
158 * if another PF has already become the Master and initialized the adapter.
159 * Setting "force_init" will cause this driver to forcibly establish itself as
160 * the Master PF and initialize the adapter.
161 */
162 static uint force_init;
163
164 module_param(force_init, uint, 0644);
165 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
166
167 /*
168 * Normally if the firmware we connect to has Configuration File support, we
169 * use that and only fall back to the old Driver-based initialization if the
170 * Configuration File fails for some reason. If force_old_init is set, then
171 * we'll always use the old Driver-based initialization sequence.
172 */
173 static uint force_old_init;
174
175 module_param(force_old_init, uint, 0644);
176 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
177 " parameter");
178
179 static int dflt_msg_enable = DFLT_MSG_ENABLE;
180
181 module_param(dflt_msg_enable, int, 0644);
182 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
183
184 /*
185 * The driver uses the best interrupt scheme available on a platform in the
186 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
187 * of these schemes the driver may consider as follows:
188 *
189 * msi = 2: choose from among all three options
190 * msi = 1: only consider MSI and INTx interrupts
191 * msi = 0: force INTx interrupts
192 */
193 static int msi = 2;
194
195 module_param(msi, int, 0644);
196 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
197
198 /*
199 * Queue interrupt hold-off timer values. Queues default to the first of these
200 * upon creation.
201 */
202 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
203
204 module_param_array(intr_holdoff, uint, NULL, 0644);
205 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
206 "0..4 in microseconds, deprecated parameter");
207
208 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
209
210 module_param_array(intr_cnt, uint, NULL, 0644);
211 MODULE_PARM_DESC(intr_cnt,
212 "thresholds 1..3 for queue interrupt packet counters, "
213 "deprecated parameter");
214
215 /*
216 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
217 * offset by 2 bytes in order to have the IP headers line up on 4-byte
218 * boundaries. This is a requirement for many architectures which will throw
219 * a machine check fault if an attempt is made to access one of the 4-byte IP
220 * header fields on a non-4-byte boundary. And it's a major performance issue
221 * even on some architectures which allow it like some implementations of the
222 * x86 ISA. However, some architectures don't mind this and for some very
223 * edge-case performance sensitive applications (like forwarding large volumes
224 * of small packets), setting this DMA offset to 0 will decrease the number of
225 * PCI-E Bus transfers enough to measurably affect performance.
226 */
227 static int rx_dma_offset = 2;
228
229 static bool vf_acls;
230
231 #ifdef CONFIG_PCI_IOV
232 module_param(vf_acls, bool, 0644);
233 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
234 "deprecated parameter");
235
236 /* Configure the number of PCI-E Virtual Function which are to be instantiated
237 * on SR-IOV Capable Physical Functions.
238 */
239 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
240
241 module_param_array(num_vf, uint, NULL, 0644);
242 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
243 #endif
244
245 /* TX Queue select used to determine what algorithm to use for selecting TX
246 * queue. Select between the kernel provided function (select_queue=0) or user
247 * cxgb_select_queue function (select_queue=1)
248 *
249 * Default: select_queue=0
250 */
251 static int select_queue;
252 module_param(select_queue, int, 0644);
253 MODULE_PARM_DESC(select_queue,
254 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
255
256 static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
257
258 module_param(tp_vlan_pri_map, uint, 0644);
259 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
260 "deprecated parameter");
261
262 static struct dentry *cxgb4_debugfs_root;
263
264 static LIST_HEAD(adapter_list);
265 static DEFINE_MUTEX(uld_mutex);
266 /* Adapter list to be accessed from atomic context */
267 static LIST_HEAD(adap_rcu_list);
268 static DEFINE_SPINLOCK(adap_rcu_lock);
269 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
270 static const char *uld_str[] = { "RDMA", "iSCSI" };
271
272 static void link_report(struct net_device *dev)
273 {
274 if (!netif_carrier_ok(dev))
275 netdev_info(dev, "link down\n");
276 else {
277 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
278
279 const char *s = "10Mbps";
280 const struct port_info *p = netdev_priv(dev);
281
282 switch (p->link_cfg.speed) {
283 case 10000:
284 s = "10Gbps";
285 break;
286 case 1000:
287 s = "1000Mbps";
288 break;
289 case 100:
290 s = "100Mbps";
291 break;
292 case 40000:
293 s = "40Gbps";
294 break;
295 }
296
297 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
298 fc[p->link_cfg.fc]);
299 }
300 }
301
302 #ifdef CONFIG_CHELSIO_T4_DCB
303 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
304 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
305 {
306 struct port_info *pi = netdev_priv(dev);
307 struct adapter *adap = pi->adapter;
308 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
309 int i;
310
311 /* We use a simple mapping of Port TX Queue Index to DCB
312 * Priority when we're enabling DCB.
313 */
314 for (i = 0; i < pi->nqsets; i++, txq++) {
315 u32 name, value;
316 int err;
317
318 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
319 FW_PARAMS_PARAM_X_V(
320 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
321 FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
322 value = enable ? i : 0xffffffff;
323
324 /* Since we can be called while atomic (from "interrupt
325 * level") we need to issue the Set Parameters Commannd
326 * without sleeping (timeout < 0).
327 */
328 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
329 &name, &value);
330
331 if (err)
332 dev_err(adap->pdev_dev,
333 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
334 enable ? "set" : "unset", pi->port_id, i, -err);
335 else
336 txq->dcb_prio = value;
337 }
338 }
339 #endif /* CONFIG_CHELSIO_T4_DCB */
340
341 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
342 {
343 struct net_device *dev = adapter->port[port_id];
344
345 /* Skip changes from disabled ports. */
346 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
347 if (link_stat)
348 netif_carrier_on(dev);
349 else {
350 #ifdef CONFIG_CHELSIO_T4_DCB
351 cxgb4_dcb_state_init(dev);
352 dcb_tx_queue_prio_enable(dev, false);
353 #endif /* CONFIG_CHELSIO_T4_DCB */
354 netif_carrier_off(dev);
355 }
356
357 link_report(dev);
358 }
359 }
360
361 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
362 {
363 static const char *mod_str[] = {
364 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
365 };
366
367 const struct net_device *dev = adap->port[port_id];
368 const struct port_info *pi = netdev_priv(dev);
369
370 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
371 netdev_info(dev, "port module unplugged\n");
372 else if (pi->mod_type < ARRAY_SIZE(mod_str))
373 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
374 }
375
376 /*
377 * Configure the exact and hash address filters to handle a port's multicast
378 * and secondary unicast MAC addresses.
379 */
380 static int set_addr_filters(const struct net_device *dev, bool sleep)
381 {
382 u64 mhash = 0;
383 u64 uhash = 0;
384 bool free = true;
385 u16 filt_idx[7];
386 const u8 *addr[7];
387 int ret, naddr = 0;
388 const struct netdev_hw_addr *ha;
389 int uc_cnt = netdev_uc_count(dev);
390 int mc_cnt = netdev_mc_count(dev);
391 const struct port_info *pi = netdev_priv(dev);
392 unsigned int mb = pi->adapter->fn;
393
394 /* first do the secondary unicast addresses */
395 netdev_for_each_uc_addr(ha, dev) {
396 addr[naddr++] = ha->addr;
397 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
398 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
399 naddr, addr, filt_idx, &uhash, sleep);
400 if (ret < 0)
401 return ret;
402
403 free = false;
404 naddr = 0;
405 }
406 }
407
408 /* next set up the multicast addresses */
409 netdev_for_each_mc_addr(ha, dev) {
410 addr[naddr++] = ha->addr;
411 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
412 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
413 naddr, addr, filt_idx, &mhash, sleep);
414 if (ret < 0)
415 return ret;
416
417 free = false;
418 naddr = 0;
419 }
420 }
421
422 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
423 uhash | mhash, sleep);
424 }
425
426 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
427 module_param(dbfifo_int_thresh, int, 0644);
428 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
429
430 /*
431 * usecs to sleep while draining the dbfifo
432 */
433 static int dbfifo_drain_delay = 1000;
434 module_param(dbfifo_drain_delay, int, 0644);
435 MODULE_PARM_DESC(dbfifo_drain_delay,
436 "usecs to sleep while draining the dbfifo");
437
438 /*
439 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
440 * If @mtu is -1 it is left unchanged.
441 */
442 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
443 {
444 int ret;
445 struct port_info *pi = netdev_priv(dev);
446
447 ret = set_addr_filters(dev, sleep_ok);
448 if (ret == 0)
449 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
450 (dev->flags & IFF_PROMISC) ? 1 : 0,
451 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
452 sleep_ok);
453 return ret;
454 }
455
456 /**
457 * link_start - enable a port
458 * @dev: the port to enable
459 *
460 * Performs the MAC and PHY actions needed to enable a port.
461 */
462 static int link_start(struct net_device *dev)
463 {
464 int ret;
465 struct port_info *pi = netdev_priv(dev);
466 unsigned int mb = pi->adapter->fn;
467
468 /*
469 * We do not set address filters and promiscuity here, the stack does
470 * that step explicitly.
471 */
472 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
473 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
474 if (ret == 0) {
475 ret = t4_change_mac(pi->adapter, mb, pi->viid,
476 pi->xact_addr_filt, dev->dev_addr, true,
477 true);
478 if (ret >= 0) {
479 pi->xact_addr_filt = ret;
480 ret = 0;
481 }
482 }
483 if (ret == 0)
484 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
485 &pi->link_cfg);
486 if (ret == 0) {
487 local_bh_disable();
488 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
489 true, CXGB4_DCB_ENABLED);
490 local_bh_enable();
491 }
492
493 return ret;
494 }
495
496 int cxgb4_dcb_enabled(const struct net_device *dev)
497 {
498 #ifdef CONFIG_CHELSIO_T4_DCB
499 struct port_info *pi = netdev_priv(dev);
500
501 if (!pi->dcb.enabled)
502 return 0;
503
504 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
505 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
506 #else
507 return 0;
508 #endif
509 }
510 EXPORT_SYMBOL(cxgb4_dcb_enabled);
511
512 #ifdef CONFIG_CHELSIO_T4_DCB
513 /* Handle a Data Center Bridging update message from the firmware. */
514 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
515 {
516 int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
517 struct net_device *dev = adap->port[port];
518 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
519 int new_dcb_enabled;
520
521 cxgb4_dcb_handle_fw_update(adap, pcmd);
522 new_dcb_enabled = cxgb4_dcb_enabled(dev);
523
524 /* If the DCB has become enabled or disabled on the port then we're
525 * going to need to set up/tear down DCB Priority parameters for the
526 * TX Queues associated with the port.
527 */
528 if (new_dcb_enabled != old_dcb_enabled)
529 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
530 }
531 #endif /* CONFIG_CHELSIO_T4_DCB */
532
533 /* Clear a filter and release any of its resources that we own. This also
534 * clears the filter's "pending" status.
535 */
536 static void clear_filter(struct adapter *adap, struct filter_entry *f)
537 {
538 /* If the new or old filter have loopback rewriteing rules then we'll
539 * need to free any existing Layer Two Table (L2T) entries of the old
540 * filter rule. The firmware will handle freeing up any Source MAC
541 * Table (SMT) entries used for rewriting Source MAC Addresses in
542 * loopback rules.
543 */
544 if (f->l2t)
545 cxgb4_l2t_release(f->l2t);
546
547 /* The zeroing of the filter rule below clears the filter valid,
548 * pending, locked flags, l2t pointer, etc. so it's all we need for
549 * this operation.
550 */
551 memset(f, 0, sizeof(*f));
552 }
553
554 /* Handle a filter write/deletion reply.
555 */
556 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
557 {
558 unsigned int idx = GET_TID(rpl);
559 unsigned int nidx = idx - adap->tids.ftid_base;
560 unsigned int ret;
561 struct filter_entry *f;
562
563 if (idx >= adap->tids.ftid_base && nidx <
564 (adap->tids.nftids + adap->tids.nsftids)) {
565 idx = nidx;
566 ret = TCB_COOKIE_G(rpl->cookie);
567 f = &adap->tids.ftid_tab[idx];
568
569 if (ret == FW_FILTER_WR_FLT_DELETED) {
570 /* Clear the filter when we get confirmation from the
571 * hardware that the filter has been deleted.
572 */
573 clear_filter(adap, f);
574 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
575 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
576 idx);
577 clear_filter(adap, f);
578 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
579 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
580 f->pending = 0; /* asynchronous setup completed */
581 f->valid = 1;
582 } else {
583 /* Something went wrong. Issue a warning about the
584 * problem and clear everything out.
585 */
586 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
587 idx, ret);
588 clear_filter(adap, f);
589 }
590 }
591 }
592
593 /* Response queue handler for the FW event queue.
594 */
595 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
596 const struct pkt_gl *gl)
597 {
598 u8 opcode = ((const struct rss_header *)rsp)->opcode;
599
600 rsp++; /* skip RSS header */
601
602 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
603 */
604 if (unlikely(opcode == CPL_FW4_MSG &&
605 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
606 rsp++;
607 opcode = ((const struct rss_header *)rsp)->opcode;
608 rsp++;
609 if (opcode != CPL_SGE_EGR_UPDATE) {
610 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
611 , opcode);
612 goto out;
613 }
614 }
615
616 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
617 const struct cpl_sge_egr_update *p = (void *)rsp;
618 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
619 struct sge_txq *txq;
620
621 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
622 txq->restarts++;
623 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
624 struct sge_eth_txq *eq;
625
626 eq = container_of(txq, struct sge_eth_txq, q);
627 netif_tx_wake_queue(eq->txq);
628 } else {
629 struct sge_ofld_txq *oq;
630
631 oq = container_of(txq, struct sge_ofld_txq, q);
632 tasklet_schedule(&oq->qresume_tsk);
633 }
634 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
635 const struct cpl_fw6_msg *p = (void *)rsp;
636
637 #ifdef CONFIG_CHELSIO_T4_DCB
638 const struct fw_port_cmd *pcmd = (const void *)p->data;
639 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
640 unsigned int action =
641 FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
642
643 if (cmd == FW_PORT_CMD &&
644 action == FW_PORT_ACTION_GET_PORT_INFO) {
645 int port = FW_PORT_CMD_PORTID_G(
646 be32_to_cpu(pcmd->op_to_portid));
647 struct net_device *dev = q->adap->port[port];
648 int state_input = ((pcmd->u.info.dcbxdis_pkd &
649 FW_PORT_CMD_DCBXDIS_F)
650 ? CXGB4_DCB_INPUT_FW_DISABLED
651 : CXGB4_DCB_INPUT_FW_ENABLED);
652
653 cxgb4_dcb_state_fsm(dev, state_input);
654 }
655
656 if (cmd == FW_PORT_CMD &&
657 action == FW_PORT_ACTION_L2_DCB_CFG)
658 dcb_rpl(q->adap, pcmd);
659 else
660 #endif
661 if (p->type == 0)
662 t4_handle_fw_rpl(q->adap, p->data);
663 } else if (opcode == CPL_L2T_WRITE_RPL) {
664 const struct cpl_l2t_write_rpl *p = (void *)rsp;
665
666 do_l2t_write_rpl(q->adap, p);
667 } else if (opcode == CPL_SET_TCB_RPL) {
668 const struct cpl_set_tcb_rpl *p = (void *)rsp;
669
670 filter_rpl(q->adap, p);
671 } else
672 dev_err(q->adap->pdev_dev,
673 "unexpected CPL %#x on FW event queue\n", opcode);
674 out:
675 return 0;
676 }
677
678 /**
679 * uldrx_handler - response queue handler for ULD queues
680 * @q: the response queue that received the packet
681 * @rsp: the response queue descriptor holding the offload message
682 * @gl: the gather list of packet fragments
683 *
684 * Deliver an ingress offload packet to a ULD. All processing is done by
685 * the ULD, we just maintain statistics.
686 */
687 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
688 const struct pkt_gl *gl)
689 {
690 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
691
692 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
693 */
694 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
695 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
696 rsp += 2;
697
698 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
699 rxq->stats.nomem++;
700 return -1;
701 }
702 if (gl == NULL)
703 rxq->stats.imm++;
704 else if (gl == CXGB4_MSG_AN)
705 rxq->stats.an++;
706 else
707 rxq->stats.pkts++;
708 return 0;
709 }
710
711 static void disable_msi(struct adapter *adapter)
712 {
713 if (adapter->flags & USING_MSIX) {
714 pci_disable_msix(adapter->pdev);
715 adapter->flags &= ~USING_MSIX;
716 } else if (adapter->flags & USING_MSI) {
717 pci_disable_msi(adapter->pdev);
718 adapter->flags &= ~USING_MSI;
719 }
720 }
721
722 /*
723 * Interrupt handler for non-data events used with MSI-X.
724 */
725 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
726 {
727 struct adapter *adap = cookie;
728 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
729
730 if (v & PFSW_F) {
731 adap->swintr = 1;
732 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
733 }
734 t4_slow_intr_handler(adap);
735 return IRQ_HANDLED;
736 }
737
738 /*
739 * Name the MSI-X interrupts.
740 */
741 static void name_msix_vecs(struct adapter *adap)
742 {
743 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
744
745 /* non-data interrupts */
746 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
747
748 /* FW events */
749 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
750 adap->port[0]->name);
751
752 /* Ethernet queues */
753 for_each_port(adap, j) {
754 struct net_device *d = adap->port[j];
755 const struct port_info *pi = netdev_priv(d);
756
757 for (i = 0; i < pi->nqsets; i++, msi_idx++)
758 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
759 d->name, i);
760 }
761
762 /* offload queues */
763 for_each_ofldrxq(&adap->sge, i)
764 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
765 adap->port[0]->name, i);
766
767 for_each_rdmarxq(&adap->sge, i)
768 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
769 adap->port[0]->name, i);
770
771 for_each_rdmaciq(&adap->sge, i)
772 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
773 adap->port[0]->name, i);
774 }
775
776 static int request_msix_queue_irqs(struct adapter *adap)
777 {
778 struct sge *s = &adap->sge;
779 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
780 int msi_index = 2;
781
782 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
783 adap->msix_info[1].desc, &s->fw_evtq);
784 if (err)
785 return err;
786
787 for_each_ethrxq(s, ethqidx) {
788 err = request_irq(adap->msix_info[msi_index].vec,
789 t4_sge_intr_msix, 0,
790 adap->msix_info[msi_index].desc,
791 &s->ethrxq[ethqidx].rspq);
792 if (err)
793 goto unwind;
794 msi_index++;
795 }
796 for_each_ofldrxq(s, ofldqidx) {
797 err = request_irq(adap->msix_info[msi_index].vec,
798 t4_sge_intr_msix, 0,
799 adap->msix_info[msi_index].desc,
800 &s->ofldrxq[ofldqidx].rspq);
801 if (err)
802 goto unwind;
803 msi_index++;
804 }
805 for_each_rdmarxq(s, rdmaqidx) {
806 err = request_irq(adap->msix_info[msi_index].vec,
807 t4_sge_intr_msix, 0,
808 adap->msix_info[msi_index].desc,
809 &s->rdmarxq[rdmaqidx].rspq);
810 if (err)
811 goto unwind;
812 msi_index++;
813 }
814 for_each_rdmaciq(s, rdmaciqqidx) {
815 err = request_irq(adap->msix_info[msi_index].vec,
816 t4_sge_intr_msix, 0,
817 adap->msix_info[msi_index].desc,
818 &s->rdmaciq[rdmaciqqidx].rspq);
819 if (err)
820 goto unwind;
821 msi_index++;
822 }
823 return 0;
824
825 unwind:
826 while (--rdmaciqqidx >= 0)
827 free_irq(adap->msix_info[--msi_index].vec,
828 &s->rdmaciq[rdmaciqqidx].rspq);
829 while (--rdmaqidx >= 0)
830 free_irq(adap->msix_info[--msi_index].vec,
831 &s->rdmarxq[rdmaqidx].rspq);
832 while (--ofldqidx >= 0)
833 free_irq(adap->msix_info[--msi_index].vec,
834 &s->ofldrxq[ofldqidx].rspq);
835 while (--ethqidx >= 0)
836 free_irq(adap->msix_info[--msi_index].vec,
837 &s->ethrxq[ethqidx].rspq);
838 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
839 return err;
840 }
841
842 static void free_msix_queue_irqs(struct adapter *adap)
843 {
844 int i, msi_index = 2;
845 struct sge *s = &adap->sge;
846
847 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
848 for_each_ethrxq(s, i)
849 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
850 for_each_ofldrxq(s, i)
851 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
852 for_each_rdmarxq(s, i)
853 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
854 for_each_rdmaciq(s, i)
855 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
856 }
857
858 /**
859 * write_rss - write the RSS table for a given port
860 * @pi: the port
861 * @queues: array of queue indices for RSS
862 *
863 * Sets up the portion of the HW RSS table for the port's VI to distribute
864 * packets to the Rx queues in @queues.
865 */
866 static int write_rss(const struct port_info *pi, const u16 *queues)
867 {
868 u16 *rss;
869 int i, err;
870 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
871
872 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
873 if (!rss)
874 return -ENOMEM;
875
876 /* map the queue indices to queue ids */
877 for (i = 0; i < pi->rss_size; i++, queues++)
878 rss[i] = q[*queues].rspq.abs_id;
879
880 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
881 pi->rss_size, rss, pi->rss_size);
882 kfree(rss);
883 return err;
884 }
885
886 /**
887 * setup_rss - configure RSS
888 * @adap: the adapter
889 *
890 * Sets up RSS for each port.
891 */
892 static int setup_rss(struct adapter *adap)
893 {
894 int i, err;
895
896 for_each_port(adap, i) {
897 const struct port_info *pi = adap2pinfo(adap, i);
898
899 err = write_rss(pi, pi->rss);
900 if (err)
901 return err;
902 }
903 return 0;
904 }
905
906 /*
907 * Return the channel of the ingress queue with the given qid.
908 */
909 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
910 {
911 qid -= p->ingr_start;
912 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
913 }
914
915 /*
916 * Wait until all NAPI handlers are descheduled.
917 */
918 static void quiesce_rx(struct adapter *adap)
919 {
920 int i;
921
922 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
923 struct sge_rspq *q = adap->sge.ingr_map[i];
924
925 if (q && q->handler)
926 napi_disable(&q->napi);
927 }
928 }
929
930 /*
931 * Enable NAPI scheduling and interrupt generation for all Rx queues.
932 */
933 static void enable_rx(struct adapter *adap)
934 {
935 int i;
936
937 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
938 struct sge_rspq *q = adap->sge.ingr_map[i];
939
940 if (!q)
941 continue;
942 if (q->handler)
943 napi_enable(&q->napi);
944 /* 0-increment GTS to start the timer and enable interrupts */
945 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
946 SEINTARM_V(q->intr_params) |
947 INGRESSQID_V(q->cntxt_id));
948 }
949 }
950
951 /**
952 * setup_sge_queues - configure SGE Tx/Rx/response queues
953 * @adap: the adapter
954 *
955 * Determines how many sets of SGE queues to use and initializes them.
956 * We support multiple queue sets per port if we have MSI-X, otherwise
957 * just one queue set per port.
958 */
959 static int setup_sge_queues(struct adapter *adap)
960 {
961 int err, msi_idx, i, j;
962 struct sge *s = &adap->sge;
963
964 bitmap_zero(s->starving_fl, MAX_EGRQ);
965 bitmap_zero(s->txq_maperr, MAX_EGRQ);
966
967 if (adap->flags & USING_MSIX)
968 msi_idx = 1; /* vector 0 is for non-queue interrupts */
969 else {
970 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
971 NULL, NULL);
972 if (err)
973 return err;
974 msi_idx = -((int)s->intrq.abs_id + 1);
975 }
976
977 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
978 msi_idx, NULL, fwevtq_handler);
979 if (err) {
980 freeout: t4_free_sge_resources(adap);
981 return err;
982 }
983
984 for_each_port(adap, i) {
985 struct net_device *dev = adap->port[i];
986 struct port_info *pi = netdev_priv(dev);
987 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
988 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
989
990 for (j = 0; j < pi->nqsets; j++, q++) {
991 if (msi_idx > 0)
992 msi_idx++;
993 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
994 msi_idx, &q->fl,
995 t4_ethrx_handler);
996 if (err)
997 goto freeout;
998 q->rspq.idx = j;
999 memset(&q->stats, 0, sizeof(q->stats));
1000 }
1001 for (j = 0; j < pi->nqsets; j++, t++) {
1002 err = t4_sge_alloc_eth_txq(adap, t, dev,
1003 netdev_get_tx_queue(dev, j),
1004 s->fw_evtq.cntxt_id);
1005 if (err)
1006 goto freeout;
1007 }
1008 }
1009
1010 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1011 for_each_ofldrxq(s, i) {
1012 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1013 struct net_device *dev = adap->port[i / j];
1014
1015 if (msi_idx > 0)
1016 msi_idx++;
1017 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1018 q->fl.size ? &q->fl : NULL,
1019 uldrx_handler);
1020 if (err)
1021 goto freeout;
1022 memset(&q->stats, 0, sizeof(q->stats));
1023 s->ofld_rxq[i] = q->rspq.abs_id;
1024 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1025 s->fw_evtq.cntxt_id);
1026 if (err)
1027 goto freeout;
1028 }
1029
1030 for_each_rdmarxq(s, i) {
1031 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1032
1033 if (msi_idx > 0)
1034 msi_idx++;
1035 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1036 msi_idx, q->fl.size ? &q->fl : NULL,
1037 uldrx_handler);
1038 if (err)
1039 goto freeout;
1040 memset(&q->stats, 0, sizeof(q->stats));
1041 s->rdma_rxq[i] = q->rspq.abs_id;
1042 }
1043
1044 for_each_rdmaciq(s, i) {
1045 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1046
1047 if (msi_idx > 0)
1048 msi_idx++;
1049 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1050 msi_idx, q->fl.size ? &q->fl : NULL,
1051 uldrx_handler);
1052 if (err)
1053 goto freeout;
1054 memset(&q->stats, 0, sizeof(q->stats));
1055 s->rdma_ciq[i] = q->rspq.abs_id;
1056 }
1057
1058 for_each_port(adap, i) {
1059 /*
1060 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1061 * have RDMA queues, and that's the right value.
1062 */
1063 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1064 s->fw_evtq.cntxt_id,
1065 s->rdmarxq[i].rspq.cntxt_id);
1066 if (err)
1067 goto freeout;
1068 }
1069
1070 t4_write_reg(adap, is_t4(adap->params.chip) ?
1071 MPS_TRC_RSS_CONTROL_A :
1072 MPS_T5_TRC_RSS_CONTROL_A,
1073 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1074 QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1075 return 0;
1076 }
1077
1078 /*
1079 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1080 * The allocated memory is cleared.
1081 */
1082 void *t4_alloc_mem(size_t size)
1083 {
1084 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1085
1086 if (!p)
1087 p = vzalloc(size);
1088 return p;
1089 }
1090
1091 /*
1092 * Free memory allocated through alloc_mem().
1093 */
1094 void t4_free_mem(void *addr)
1095 {
1096 if (is_vmalloc_addr(addr))
1097 vfree(addr);
1098 else
1099 kfree(addr);
1100 }
1101
1102 /* Send a Work Request to write the filter at a specified index. We construct
1103 * a Firmware Filter Work Request to have the work done and put the indicated
1104 * filter into "pending" mode which will prevent any further actions against
1105 * it till we get a reply from the firmware on the completion status of the
1106 * request.
1107 */
1108 static int set_filter_wr(struct adapter *adapter, int fidx)
1109 {
1110 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1111 struct sk_buff *skb;
1112 struct fw_filter_wr *fwr;
1113 unsigned int ftid;
1114
1115 /* If the new filter requires loopback Destination MAC and/or VLAN
1116 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1117 * the filter.
1118 */
1119 if (f->fs.newdmac || f->fs.newvlan) {
1120 /* allocate L2T entry for new filter */
1121 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1122 if (f->l2t == NULL)
1123 return -EAGAIN;
1124 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1125 f->fs.eport, f->fs.dmac)) {
1126 cxgb4_l2t_release(f->l2t);
1127 f->l2t = NULL;
1128 return -ENOMEM;
1129 }
1130 }
1131
1132 ftid = adapter->tids.ftid_base + fidx;
1133
1134 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1135 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1136 memset(fwr, 0, sizeof(*fwr));
1137
1138 /* It would be nice to put most of the following in t4_hw.c but most
1139 * of the work is translating the cxgbtool ch_filter_specification
1140 * into the Work Request and the definition of that structure is
1141 * currently in cxgbtool.h which isn't appropriate to pull into the
1142 * common code. We may eventually try to come up with a more neutral
1143 * filter specification structure but for now it's easiest to simply
1144 * put this fairly direct code in line ...
1145 */
1146 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
1147 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
1148 fwr->tid_to_iq =
1149 htonl(FW_FILTER_WR_TID_V(ftid) |
1150 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
1151 FW_FILTER_WR_NOREPLY_V(0) |
1152 FW_FILTER_WR_IQ_V(f->fs.iq));
1153 fwr->del_filter_to_l2tix =
1154 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
1155 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
1156 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
1157 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
1158 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
1159 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
1160 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
1161 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
1162 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
1163 f->fs.newvlan == VLAN_REWRITE) |
1164 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
1165 f->fs.newvlan == VLAN_REWRITE) |
1166 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
1167 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
1168 FW_FILTER_WR_PRIO_V(f->fs.prio) |
1169 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
1170 fwr->ethtype = htons(f->fs.val.ethtype);
1171 fwr->ethtypem = htons(f->fs.mask.ethtype);
1172 fwr->frag_to_ovlan_vldm =
1173 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
1174 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
1175 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
1176 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
1177 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
1178 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
1179 fwr->smac_sel = 0;
1180 fwr->rx_chan_rx_rpl_iq =
1181 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1182 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
1183 fwr->maci_to_matchtypem =
1184 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
1185 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
1186 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
1187 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
1188 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
1189 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
1190 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
1191 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
1192 fwr->ptcl = f->fs.val.proto;
1193 fwr->ptclm = f->fs.mask.proto;
1194 fwr->ttyp = f->fs.val.tos;
1195 fwr->ttypm = f->fs.mask.tos;
1196 fwr->ivlan = htons(f->fs.val.ivlan);
1197 fwr->ivlanm = htons(f->fs.mask.ivlan);
1198 fwr->ovlan = htons(f->fs.val.ovlan);
1199 fwr->ovlanm = htons(f->fs.mask.ovlan);
1200 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1201 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1202 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1203 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1204 fwr->lp = htons(f->fs.val.lport);
1205 fwr->lpm = htons(f->fs.mask.lport);
1206 fwr->fp = htons(f->fs.val.fport);
1207 fwr->fpm = htons(f->fs.mask.fport);
1208 if (f->fs.newsmac)
1209 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1210
1211 /* Mark the filter as "pending" and ship off the Filter Work Request.
1212 * When we get the Work Request Reply we'll clear the pending status.
1213 */
1214 f->pending = 1;
1215 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1216 t4_ofld_send(adapter, skb);
1217 return 0;
1218 }
1219
1220 /* Delete the filter at a specified index.
1221 */
1222 static int del_filter_wr(struct adapter *adapter, int fidx)
1223 {
1224 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1225 struct sk_buff *skb;
1226 struct fw_filter_wr *fwr;
1227 unsigned int len, ftid;
1228
1229 len = sizeof(*fwr);
1230 ftid = adapter->tids.ftid_base + fidx;
1231
1232 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1233 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1234 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1235
1236 /* Mark the filter as "pending" and ship off the Filter Work Request.
1237 * When we get the Work Request Reply we'll clear the pending status.
1238 */
1239 f->pending = 1;
1240 t4_mgmt_tx(adapter, skb);
1241 return 0;
1242 }
1243
1244 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1245 void *accel_priv, select_queue_fallback_t fallback)
1246 {
1247 int txq;
1248
1249 #ifdef CONFIG_CHELSIO_T4_DCB
1250 /* If a Data Center Bridging has been successfully negotiated on this
1251 * link then we'll use the skb's priority to map it to a TX Queue.
1252 * The skb's priority is determined via the VLAN Tag Priority Code
1253 * Point field.
1254 */
1255 if (cxgb4_dcb_enabled(dev)) {
1256 u16 vlan_tci;
1257 int err;
1258
1259 err = vlan_get_tag(skb, &vlan_tci);
1260 if (unlikely(err)) {
1261 if (net_ratelimit())
1262 netdev_warn(dev,
1263 "TX Packet without VLAN Tag on DCB Link\n");
1264 txq = 0;
1265 } else {
1266 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1267 }
1268 return txq;
1269 }
1270 #endif /* CONFIG_CHELSIO_T4_DCB */
1271
1272 if (select_queue) {
1273 txq = (skb_rx_queue_recorded(skb)
1274 ? skb_get_rx_queue(skb)
1275 : smp_processor_id());
1276
1277 while (unlikely(txq >= dev->real_num_tx_queues))
1278 txq -= dev->real_num_tx_queues;
1279
1280 return txq;
1281 }
1282
1283 return fallback(dev, skb) % dev->real_num_tx_queues;
1284 }
1285
1286 static inline int is_offload(const struct adapter *adap)
1287 {
1288 return adap->params.offload;
1289 }
1290
1291 /*
1292 * Implementation of ethtool operations.
1293 */
1294
1295 static u32 get_msglevel(struct net_device *dev)
1296 {
1297 return netdev2adap(dev)->msg_enable;
1298 }
1299
1300 static void set_msglevel(struct net_device *dev, u32 val)
1301 {
1302 netdev2adap(dev)->msg_enable = val;
1303 }
1304
1305 static char stats_strings[][ETH_GSTRING_LEN] = {
1306 "TxOctetsOK ",
1307 "TxFramesOK ",
1308 "TxBroadcastFrames ",
1309 "TxMulticastFrames ",
1310 "TxUnicastFrames ",
1311 "TxErrorFrames ",
1312
1313 "TxFrames64 ",
1314 "TxFrames65To127 ",
1315 "TxFrames128To255 ",
1316 "TxFrames256To511 ",
1317 "TxFrames512To1023 ",
1318 "TxFrames1024To1518 ",
1319 "TxFrames1519ToMax ",
1320
1321 "TxFramesDropped ",
1322 "TxPauseFrames ",
1323 "TxPPP0Frames ",
1324 "TxPPP1Frames ",
1325 "TxPPP2Frames ",
1326 "TxPPP3Frames ",
1327 "TxPPP4Frames ",
1328 "TxPPP5Frames ",
1329 "TxPPP6Frames ",
1330 "TxPPP7Frames ",
1331
1332 "RxOctetsOK ",
1333 "RxFramesOK ",
1334 "RxBroadcastFrames ",
1335 "RxMulticastFrames ",
1336 "RxUnicastFrames ",
1337
1338 "RxFramesTooLong ",
1339 "RxJabberErrors ",
1340 "RxFCSErrors ",
1341 "RxLengthErrors ",
1342 "RxSymbolErrors ",
1343 "RxRuntFrames ",
1344
1345 "RxFrames64 ",
1346 "RxFrames65To127 ",
1347 "RxFrames128To255 ",
1348 "RxFrames256To511 ",
1349 "RxFrames512To1023 ",
1350 "RxFrames1024To1518 ",
1351 "RxFrames1519ToMax ",
1352
1353 "RxPauseFrames ",
1354 "RxPPP0Frames ",
1355 "RxPPP1Frames ",
1356 "RxPPP2Frames ",
1357 "RxPPP3Frames ",
1358 "RxPPP4Frames ",
1359 "RxPPP5Frames ",
1360 "RxPPP6Frames ",
1361 "RxPPP7Frames ",
1362
1363 "RxBG0FramesDropped ",
1364 "RxBG1FramesDropped ",
1365 "RxBG2FramesDropped ",
1366 "RxBG3FramesDropped ",
1367 "RxBG0FramesTrunc ",
1368 "RxBG1FramesTrunc ",
1369 "RxBG2FramesTrunc ",
1370 "RxBG3FramesTrunc ",
1371
1372 "TSO ",
1373 "TxCsumOffload ",
1374 "RxCsumGood ",
1375 "VLANextractions ",
1376 "VLANinsertions ",
1377 "GROpackets ",
1378 "GROmerged ",
1379 "WriteCoalSuccess ",
1380 "WriteCoalFail ",
1381 };
1382
1383 static int get_sset_count(struct net_device *dev, int sset)
1384 {
1385 switch (sset) {
1386 case ETH_SS_STATS:
1387 return ARRAY_SIZE(stats_strings);
1388 default:
1389 return -EOPNOTSUPP;
1390 }
1391 }
1392
1393 #define T4_REGMAP_SIZE (160 * 1024)
1394 #define T5_REGMAP_SIZE (332 * 1024)
1395
1396 static int get_regs_len(struct net_device *dev)
1397 {
1398 struct adapter *adap = netdev2adap(dev);
1399 if (is_t4(adap->params.chip))
1400 return T4_REGMAP_SIZE;
1401 else
1402 return T5_REGMAP_SIZE;
1403 }
1404
1405 static int get_eeprom_len(struct net_device *dev)
1406 {
1407 return EEPROMSIZE;
1408 }
1409
1410 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1411 {
1412 struct adapter *adapter = netdev2adap(dev);
1413
1414 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1415 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1416 strlcpy(info->bus_info, pci_name(adapter->pdev),
1417 sizeof(info->bus_info));
1418
1419 if (adapter->params.fw_vers)
1420 snprintf(info->fw_version, sizeof(info->fw_version),
1421 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1422 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
1423 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
1424 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
1425 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
1426 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
1427 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
1428 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
1429 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
1430 }
1431
1432 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1433 {
1434 if (stringset == ETH_SS_STATS)
1435 memcpy(data, stats_strings, sizeof(stats_strings));
1436 }
1437
1438 /*
1439 * port stats maintained per queue of the port. They should be in the same
1440 * order as in stats_strings above.
1441 */
1442 struct queue_port_stats {
1443 u64 tso;
1444 u64 tx_csum;
1445 u64 rx_csum;
1446 u64 vlan_ex;
1447 u64 vlan_ins;
1448 u64 gro_pkts;
1449 u64 gro_merged;
1450 };
1451
1452 static void collect_sge_port_stats(const struct adapter *adap,
1453 const struct port_info *p, struct queue_port_stats *s)
1454 {
1455 int i;
1456 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1457 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1458
1459 memset(s, 0, sizeof(*s));
1460 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1461 s->tso += tx->tso;
1462 s->tx_csum += tx->tx_cso;
1463 s->rx_csum += rx->stats.rx_cso;
1464 s->vlan_ex += rx->stats.vlan_ex;
1465 s->vlan_ins += tx->vlan_ins;
1466 s->gro_pkts += rx->stats.lro_pkts;
1467 s->gro_merged += rx->stats.lro_merged;
1468 }
1469 }
1470
1471 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1472 u64 *data)
1473 {
1474 struct port_info *pi = netdev_priv(dev);
1475 struct adapter *adapter = pi->adapter;
1476 u32 val1, val2;
1477
1478 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1479
1480 data += sizeof(struct port_stats) / sizeof(u64);
1481 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1482 data += sizeof(struct queue_port_stats) / sizeof(u64);
1483 if (!is_t4(adapter->params.chip)) {
1484 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
1485 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
1486 val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
1487 *data = val1 - val2;
1488 data++;
1489 *data = val2;
1490 data++;
1491 } else {
1492 memset(data, 0, 2 * sizeof(u64));
1493 *data += 2;
1494 }
1495 }
1496
1497 /*
1498 * Return a version number to identify the type of adapter. The scheme is:
1499 * - bits 0..9: chip version
1500 * - bits 10..15: chip revision
1501 * - bits 16..23: register dump version
1502 */
1503 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1504 {
1505 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1506 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1507 }
1508
1509 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1510 unsigned int end)
1511 {
1512 u32 *p = buf + start;
1513
1514 for ( ; start <= end; start += sizeof(u32))
1515 *p++ = t4_read_reg(ap, start);
1516 }
1517
1518 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1519 void *buf)
1520 {
1521 static const unsigned int t4_reg_ranges[] = {
1522 0x1008, 0x1108,
1523 0x1180, 0x11b4,
1524 0x11fc, 0x123c,
1525 0x1300, 0x173c,
1526 0x1800, 0x18fc,
1527 0x3000, 0x30d8,
1528 0x30e0, 0x5924,
1529 0x5960, 0x59d4,
1530 0x5a00, 0x5af8,
1531 0x6000, 0x6098,
1532 0x6100, 0x6150,
1533 0x6200, 0x6208,
1534 0x6240, 0x6248,
1535 0x6280, 0x6338,
1536 0x6370, 0x638c,
1537 0x6400, 0x643c,
1538 0x6500, 0x6524,
1539 0x6a00, 0x6a38,
1540 0x6a60, 0x6a78,
1541 0x6b00, 0x6b84,
1542 0x6bf0, 0x6c84,
1543 0x6cf0, 0x6d84,
1544 0x6df0, 0x6e84,
1545 0x6ef0, 0x6f84,
1546 0x6ff0, 0x7084,
1547 0x70f0, 0x7184,
1548 0x71f0, 0x7284,
1549 0x72f0, 0x7384,
1550 0x73f0, 0x7450,
1551 0x7500, 0x7530,
1552 0x7600, 0x761c,
1553 0x7680, 0x76cc,
1554 0x7700, 0x7798,
1555 0x77c0, 0x77fc,
1556 0x7900, 0x79fc,
1557 0x7b00, 0x7c38,
1558 0x7d00, 0x7efc,
1559 0x8dc0, 0x8e1c,
1560 0x8e30, 0x8e78,
1561 0x8ea0, 0x8f6c,
1562 0x8fc0, 0x9074,
1563 0x90fc, 0x90fc,
1564 0x9400, 0x9458,
1565 0x9600, 0x96bc,
1566 0x9800, 0x9808,
1567 0x9820, 0x983c,
1568 0x9850, 0x9864,
1569 0x9c00, 0x9c6c,
1570 0x9c80, 0x9cec,
1571 0x9d00, 0x9d6c,
1572 0x9d80, 0x9dec,
1573 0x9e00, 0x9e6c,
1574 0x9e80, 0x9eec,
1575 0x9f00, 0x9f6c,
1576 0x9f80, 0x9fec,
1577 0xd004, 0xd03c,
1578 0xdfc0, 0xdfe0,
1579 0xe000, 0xea7c,
1580 0xf000, 0x11110,
1581 0x11118, 0x11190,
1582 0x19040, 0x1906c,
1583 0x19078, 0x19080,
1584 0x1908c, 0x19124,
1585 0x19150, 0x191b0,
1586 0x191d0, 0x191e8,
1587 0x19238, 0x1924c,
1588 0x193f8, 0x19474,
1589 0x19490, 0x194f8,
1590 0x19800, 0x19f30,
1591 0x1a000, 0x1a06c,
1592 0x1a0b0, 0x1a120,
1593 0x1a128, 0x1a138,
1594 0x1a190, 0x1a1c4,
1595 0x1a1fc, 0x1a1fc,
1596 0x1e040, 0x1e04c,
1597 0x1e284, 0x1e28c,
1598 0x1e2c0, 0x1e2c0,
1599 0x1e2e0, 0x1e2e0,
1600 0x1e300, 0x1e384,
1601 0x1e3c0, 0x1e3c8,
1602 0x1e440, 0x1e44c,
1603 0x1e684, 0x1e68c,
1604 0x1e6c0, 0x1e6c0,
1605 0x1e6e0, 0x1e6e0,
1606 0x1e700, 0x1e784,
1607 0x1e7c0, 0x1e7c8,
1608 0x1e840, 0x1e84c,
1609 0x1ea84, 0x1ea8c,
1610 0x1eac0, 0x1eac0,
1611 0x1eae0, 0x1eae0,
1612 0x1eb00, 0x1eb84,
1613 0x1ebc0, 0x1ebc8,
1614 0x1ec40, 0x1ec4c,
1615 0x1ee84, 0x1ee8c,
1616 0x1eec0, 0x1eec0,
1617 0x1eee0, 0x1eee0,
1618 0x1ef00, 0x1ef84,
1619 0x1efc0, 0x1efc8,
1620 0x1f040, 0x1f04c,
1621 0x1f284, 0x1f28c,
1622 0x1f2c0, 0x1f2c0,
1623 0x1f2e0, 0x1f2e0,
1624 0x1f300, 0x1f384,
1625 0x1f3c0, 0x1f3c8,
1626 0x1f440, 0x1f44c,
1627 0x1f684, 0x1f68c,
1628 0x1f6c0, 0x1f6c0,
1629 0x1f6e0, 0x1f6e0,
1630 0x1f700, 0x1f784,
1631 0x1f7c0, 0x1f7c8,
1632 0x1f840, 0x1f84c,
1633 0x1fa84, 0x1fa8c,
1634 0x1fac0, 0x1fac0,
1635 0x1fae0, 0x1fae0,
1636 0x1fb00, 0x1fb84,
1637 0x1fbc0, 0x1fbc8,
1638 0x1fc40, 0x1fc4c,
1639 0x1fe84, 0x1fe8c,
1640 0x1fec0, 0x1fec0,
1641 0x1fee0, 0x1fee0,
1642 0x1ff00, 0x1ff84,
1643 0x1ffc0, 0x1ffc8,
1644 0x20000, 0x2002c,
1645 0x20100, 0x2013c,
1646 0x20190, 0x201c8,
1647 0x20200, 0x20318,
1648 0x20400, 0x20528,
1649 0x20540, 0x20614,
1650 0x21000, 0x21040,
1651 0x2104c, 0x21060,
1652 0x210c0, 0x210ec,
1653 0x21200, 0x21268,
1654 0x21270, 0x21284,
1655 0x212fc, 0x21388,
1656 0x21400, 0x21404,
1657 0x21500, 0x21518,
1658 0x2152c, 0x2153c,
1659 0x21550, 0x21554,
1660 0x21600, 0x21600,
1661 0x21608, 0x21628,
1662 0x21630, 0x2163c,
1663 0x21700, 0x2171c,
1664 0x21780, 0x2178c,
1665 0x21800, 0x21c38,
1666 0x21c80, 0x21d7c,
1667 0x21e00, 0x21e04,
1668 0x22000, 0x2202c,
1669 0x22100, 0x2213c,
1670 0x22190, 0x221c8,
1671 0x22200, 0x22318,
1672 0x22400, 0x22528,
1673 0x22540, 0x22614,
1674 0x23000, 0x23040,
1675 0x2304c, 0x23060,
1676 0x230c0, 0x230ec,
1677 0x23200, 0x23268,
1678 0x23270, 0x23284,
1679 0x232fc, 0x23388,
1680 0x23400, 0x23404,
1681 0x23500, 0x23518,
1682 0x2352c, 0x2353c,
1683 0x23550, 0x23554,
1684 0x23600, 0x23600,
1685 0x23608, 0x23628,
1686 0x23630, 0x2363c,
1687 0x23700, 0x2371c,
1688 0x23780, 0x2378c,
1689 0x23800, 0x23c38,
1690 0x23c80, 0x23d7c,
1691 0x23e00, 0x23e04,
1692 0x24000, 0x2402c,
1693 0x24100, 0x2413c,
1694 0x24190, 0x241c8,
1695 0x24200, 0x24318,
1696 0x24400, 0x24528,
1697 0x24540, 0x24614,
1698 0x25000, 0x25040,
1699 0x2504c, 0x25060,
1700 0x250c0, 0x250ec,
1701 0x25200, 0x25268,
1702 0x25270, 0x25284,
1703 0x252fc, 0x25388,
1704 0x25400, 0x25404,
1705 0x25500, 0x25518,
1706 0x2552c, 0x2553c,
1707 0x25550, 0x25554,
1708 0x25600, 0x25600,
1709 0x25608, 0x25628,
1710 0x25630, 0x2563c,
1711 0x25700, 0x2571c,
1712 0x25780, 0x2578c,
1713 0x25800, 0x25c38,
1714 0x25c80, 0x25d7c,
1715 0x25e00, 0x25e04,
1716 0x26000, 0x2602c,
1717 0x26100, 0x2613c,
1718 0x26190, 0x261c8,
1719 0x26200, 0x26318,
1720 0x26400, 0x26528,
1721 0x26540, 0x26614,
1722 0x27000, 0x27040,
1723 0x2704c, 0x27060,
1724 0x270c0, 0x270ec,
1725 0x27200, 0x27268,
1726 0x27270, 0x27284,
1727 0x272fc, 0x27388,
1728 0x27400, 0x27404,
1729 0x27500, 0x27518,
1730 0x2752c, 0x2753c,
1731 0x27550, 0x27554,
1732 0x27600, 0x27600,
1733 0x27608, 0x27628,
1734 0x27630, 0x2763c,
1735 0x27700, 0x2771c,
1736 0x27780, 0x2778c,
1737 0x27800, 0x27c38,
1738 0x27c80, 0x27d7c,
1739 0x27e00, 0x27e04
1740 };
1741
1742 static const unsigned int t5_reg_ranges[] = {
1743 0x1008, 0x1148,
1744 0x1180, 0x11b4,
1745 0x11fc, 0x123c,
1746 0x1280, 0x173c,
1747 0x1800, 0x18fc,
1748 0x3000, 0x3028,
1749 0x3060, 0x30d8,
1750 0x30e0, 0x30fc,
1751 0x3140, 0x357c,
1752 0x35a8, 0x35cc,
1753 0x35ec, 0x35ec,
1754 0x3600, 0x5624,
1755 0x56cc, 0x575c,
1756 0x580c, 0x5814,
1757 0x5890, 0x58bc,
1758 0x5940, 0x59dc,
1759 0x59fc, 0x5a18,
1760 0x5a60, 0x5a9c,
1761 0x5b9c, 0x5bfc,
1762 0x6000, 0x6040,
1763 0x6058, 0x614c,
1764 0x7700, 0x7798,
1765 0x77c0, 0x78fc,
1766 0x7b00, 0x7c54,
1767 0x7d00, 0x7efc,
1768 0x8dc0, 0x8de0,
1769 0x8df8, 0x8e84,
1770 0x8ea0, 0x8f84,
1771 0x8fc0, 0x90f8,
1772 0x9400, 0x9470,
1773 0x9600, 0x96f4,
1774 0x9800, 0x9808,
1775 0x9820, 0x983c,
1776 0x9850, 0x9864,
1777 0x9c00, 0x9c6c,
1778 0x9c80, 0x9cec,
1779 0x9d00, 0x9d6c,
1780 0x9d80, 0x9dec,
1781 0x9e00, 0x9e6c,
1782 0x9e80, 0x9eec,
1783 0x9f00, 0x9f6c,
1784 0x9f80, 0xa020,
1785 0xd004, 0xd03c,
1786 0xdfc0, 0xdfe0,
1787 0xe000, 0x11088,
1788 0x1109c, 0x11110,
1789 0x11118, 0x1117c,
1790 0x11190, 0x11204,
1791 0x19040, 0x1906c,
1792 0x19078, 0x19080,
1793 0x1908c, 0x19124,
1794 0x19150, 0x191b0,
1795 0x191d0, 0x191e8,
1796 0x19238, 0x19290,
1797 0x193f8, 0x19474,
1798 0x19490, 0x194cc,
1799 0x194f0, 0x194f8,
1800 0x19c00, 0x19c60,
1801 0x19c94, 0x19e10,
1802 0x19e50, 0x19f34,
1803 0x19f40, 0x19f50,
1804 0x19f90, 0x19fe4,
1805 0x1a000, 0x1a06c,
1806 0x1a0b0, 0x1a120,
1807 0x1a128, 0x1a138,
1808 0x1a190, 0x1a1c4,
1809 0x1a1fc, 0x1a1fc,
1810 0x1e008, 0x1e00c,
1811 0x1e040, 0x1e04c,
1812 0x1e284, 0x1e290,
1813 0x1e2c0, 0x1e2c0,
1814 0x1e2e0, 0x1e2e0,
1815 0x1e300, 0x1e384,
1816 0x1e3c0, 0x1e3c8,
1817 0x1e408, 0x1e40c,
1818 0x1e440, 0x1e44c,
1819 0x1e684, 0x1e690,
1820 0x1e6c0, 0x1e6c0,
1821 0x1e6e0, 0x1e6e0,
1822 0x1e700, 0x1e784,
1823 0x1e7c0, 0x1e7c8,
1824 0x1e808, 0x1e80c,
1825 0x1e840, 0x1e84c,
1826 0x1ea84, 0x1ea90,
1827 0x1eac0, 0x1eac0,
1828 0x1eae0, 0x1eae0,
1829 0x1eb00, 0x1eb84,
1830 0x1ebc0, 0x1ebc8,
1831 0x1ec08, 0x1ec0c,
1832 0x1ec40, 0x1ec4c,
1833 0x1ee84, 0x1ee90,
1834 0x1eec0, 0x1eec0,
1835 0x1eee0, 0x1eee0,
1836 0x1ef00, 0x1ef84,
1837 0x1efc0, 0x1efc8,
1838 0x1f008, 0x1f00c,
1839 0x1f040, 0x1f04c,
1840 0x1f284, 0x1f290,
1841 0x1f2c0, 0x1f2c0,
1842 0x1f2e0, 0x1f2e0,
1843 0x1f300, 0x1f384,
1844 0x1f3c0, 0x1f3c8,
1845 0x1f408, 0x1f40c,
1846 0x1f440, 0x1f44c,
1847 0x1f684, 0x1f690,
1848 0x1f6c0, 0x1f6c0,
1849 0x1f6e0, 0x1f6e0,
1850 0x1f700, 0x1f784,
1851 0x1f7c0, 0x1f7c8,
1852 0x1f808, 0x1f80c,
1853 0x1f840, 0x1f84c,
1854 0x1fa84, 0x1fa90,
1855 0x1fac0, 0x1fac0,
1856 0x1fae0, 0x1fae0,
1857 0x1fb00, 0x1fb84,
1858 0x1fbc0, 0x1fbc8,
1859 0x1fc08, 0x1fc0c,
1860 0x1fc40, 0x1fc4c,
1861 0x1fe84, 0x1fe90,
1862 0x1fec0, 0x1fec0,
1863 0x1fee0, 0x1fee0,
1864 0x1ff00, 0x1ff84,
1865 0x1ffc0, 0x1ffc8,
1866 0x30000, 0x30030,
1867 0x30100, 0x30144,
1868 0x30190, 0x301d0,
1869 0x30200, 0x30318,
1870 0x30400, 0x3052c,
1871 0x30540, 0x3061c,
1872 0x30800, 0x30834,
1873 0x308c0, 0x30908,
1874 0x30910, 0x309ac,
1875 0x30a00, 0x30a04,
1876 0x30a0c, 0x30a2c,
1877 0x30a44, 0x30a50,
1878 0x30a74, 0x30c24,
1879 0x30d08, 0x30d14,
1880 0x30d1c, 0x30d20,
1881 0x30d3c, 0x30d50,
1882 0x31200, 0x3120c,
1883 0x31220, 0x31220,
1884 0x31240, 0x31240,
1885 0x31600, 0x31600,
1886 0x31608, 0x3160c,
1887 0x31a00, 0x31a1c,
1888 0x31e04, 0x31e20,
1889 0x31e38, 0x31e3c,
1890 0x31e80, 0x31e80,
1891 0x31e88, 0x31ea8,
1892 0x31eb0, 0x31eb4,
1893 0x31ec8, 0x31ed4,
1894 0x31fb8, 0x32004,
1895 0x32208, 0x3223c,
1896 0x32600, 0x32630,
1897 0x32a00, 0x32abc,
1898 0x32b00, 0x32b70,
1899 0x33000, 0x33048,
1900 0x33060, 0x3309c,
1901 0x330f0, 0x33148,
1902 0x33160, 0x3319c,
1903 0x331f0, 0x332e4,
1904 0x332f8, 0x333e4,
1905 0x333f8, 0x33448,
1906 0x33460, 0x3349c,
1907 0x334f0, 0x33548,
1908 0x33560, 0x3359c,
1909 0x335f0, 0x336e4,
1910 0x336f8, 0x337e4,
1911 0x337f8, 0x337fc,
1912 0x33814, 0x33814,
1913 0x3382c, 0x3382c,
1914 0x33880, 0x3388c,
1915 0x338e8, 0x338ec,
1916 0x33900, 0x33948,
1917 0x33960, 0x3399c,
1918 0x339f0, 0x33ae4,
1919 0x33af8, 0x33b10,
1920 0x33b28, 0x33b28,
1921 0x33b3c, 0x33b50,
1922 0x33bf0, 0x33c10,
1923 0x33c28, 0x33c28,
1924 0x33c3c, 0x33c50,
1925 0x33cf0, 0x33cfc,
1926 0x34000, 0x34030,
1927 0x34100, 0x34144,
1928 0x34190, 0x341d0,
1929 0x34200, 0x34318,
1930 0x34400, 0x3452c,
1931 0x34540, 0x3461c,
1932 0x34800, 0x34834,
1933 0x348c0, 0x34908,
1934 0x34910, 0x349ac,
1935 0x34a00, 0x34a04,
1936 0x34a0c, 0x34a2c,
1937 0x34a44, 0x34a50,
1938 0x34a74, 0x34c24,
1939 0x34d08, 0x34d14,
1940 0x34d1c, 0x34d20,
1941 0x34d3c, 0x34d50,
1942 0x35200, 0x3520c,
1943 0x35220, 0x35220,
1944 0x35240, 0x35240,
1945 0x35600, 0x35600,
1946 0x35608, 0x3560c,
1947 0x35a00, 0x35a1c,
1948 0x35e04, 0x35e20,
1949 0x35e38, 0x35e3c,
1950 0x35e80, 0x35e80,
1951 0x35e88, 0x35ea8,
1952 0x35eb0, 0x35eb4,
1953 0x35ec8, 0x35ed4,
1954 0x35fb8, 0x36004,
1955 0x36208, 0x3623c,
1956 0x36600, 0x36630,
1957 0x36a00, 0x36abc,
1958 0x36b00, 0x36b70,
1959 0x37000, 0x37048,
1960 0x37060, 0x3709c,
1961 0x370f0, 0x37148,
1962 0x37160, 0x3719c,
1963 0x371f0, 0x372e4,
1964 0x372f8, 0x373e4,
1965 0x373f8, 0x37448,
1966 0x37460, 0x3749c,
1967 0x374f0, 0x37548,
1968 0x37560, 0x3759c,
1969 0x375f0, 0x376e4,
1970 0x376f8, 0x377e4,
1971 0x377f8, 0x377fc,
1972 0x37814, 0x37814,
1973 0x3782c, 0x3782c,
1974 0x37880, 0x3788c,
1975 0x378e8, 0x378ec,
1976 0x37900, 0x37948,
1977 0x37960, 0x3799c,
1978 0x379f0, 0x37ae4,
1979 0x37af8, 0x37b10,
1980 0x37b28, 0x37b28,
1981 0x37b3c, 0x37b50,
1982 0x37bf0, 0x37c10,
1983 0x37c28, 0x37c28,
1984 0x37c3c, 0x37c50,
1985 0x37cf0, 0x37cfc,
1986 0x38000, 0x38030,
1987 0x38100, 0x38144,
1988 0x38190, 0x381d0,
1989 0x38200, 0x38318,
1990 0x38400, 0x3852c,
1991 0x38540, 0x3861c,
1992 0x38800, 0x38834,
1993 0x388c0, 0x38908,
1994 0x38910, 0x389ac,
1995 0x38a00, 0x38a04,
1996 0x38a0c, 0x38a2c,
1997 0x38a44, 0x38a50,
1998 0x38a74, 0x38c24,
1999 0x38d08, 0x38d14,
2000 0x38d1c, 0x38d20,
2001 0x38d3c, 0x38d50,
2002 0x39200, 0x3920c,
2003 0x39220, 0x39220,
2004 0x39240, 0x39240,
2005 0x39600, 0x39600,
2006 0x39608, 0x3960c,
2007 0x39a00, 0x39a1c,
2008 0x39e04, 0x39e20,
2009 0x39e38, 0x39e3c,
2010 0x39e80, 0x39e80,
2011 0x39e88, 0x39ea8,
2012 0x39eb0, 0x39eb4,
2013 0x39ec8, 0x39ed4,
2014 0x39fb8, 0x3a004,
2015 0x3a208, 0x3a23c,
2016 0x3a600, 0x3a630,
2017 0x3aa00, 0x3aabc,
2018 0x3ab00, 0x3ab70,
2019 0x3b000, 0x3b048,
2020 0x3b060, 0x3b09c,
2021 0x3b0f0, 0x3b148,
2022 0x3b160, 0x3b19c,
2023 0x3b1f0, 0x3b2e4,
2024 0x3b2f8, 0x3b3e4,
2025 0x3b3f8, 0x3b448,
2026 0x3b460, 0x3b49c,
2027 0x3b4f0, 0x3b548,
2028 0x3b560, 0x3b59c,
2029 0x3b5f0, 0x3b6e4,
2030 0x3b6f8, 0x3b7e4,
2031 0x3b7f8, 0x3b7fc,
2032 0x3b814, 0x3b814,
2033 0x3b82c, 0x3b82c,
2034 0x3b880, 0x3b88c,
2035 0x3b8e8, 0x3b8ec,
2036 0x3b900, 0x3b948,
2037 0x3b960, 0x3b99c,
2038 0x3b9f0, 0x3bae4,
2039 0x3baf8, 0x3bb10,
2040 0x3bb28, 0x3bb28,
2041 0x3bb3c, 0x3bb50,
2042 0x3bbf0, 0x3bc10,
2043 0x3bc28, 0x3bc28,
2044 0x3bc3c, 0x3bc50,
2045 0x3bcf0, 0x3bcfc,
2046 0x3c000, 0x3c030,
2047 0x3c100, 0x3c144,
2048 0x3c190, 0x3c1d0,
2049 0x3c200, 0x3c318,
2050 0x3c400, 0x3c52c,
2051 0x3c540, 0x3c61c,
2052 0x3c800, 0x3c834,
2053 0x3c8c0, 0x3c908,
2054 0x3c910, 0x3c9ac,
2055 0x3ca00, 0x3ca04,
2056 0x3ca0c, 0x3ca2c,
2057 0x3ca44, 0x3ca50,
2058 0x3ca74, 0x3cc24,
2059 0x3cd08, 0x3cd14,
2060 0x3cd1c, 0x3cd20,
2061 0x3cd3c, 0x3cd50,
2062 0x3d200, 0x3d20c,
2063 0x3d220, 0x3d220,
2064 0x3d240, 0x3d240,
2065 0x3d600, 0x3d600,
2066 0x3d608, 0x3d60c,
2067 0x3da00, 0x3da1c,
2068 0x3de04, 0x3de20,
2069 0x3de38, 0x3de3c,
2070 0x3de80, 0x3de80,
2071 0x3de88, 0x3dea8,
2072 0x3deb0, 0x3deb4,
2073 0x3dec8, 0x3ded4,
2074 0x3dfb8, 0x3e004,
2075 0x3e208, 0x3e23c,
2076 0x3e600, 0x3e630,
2077 0x3ea00, 0x3eabc,
2078 0x3eb00, 0x3eb70,
2079 0x3f000, 0x3f048,
2080 0x3f060, 0x3f09c,
2081 0x3f0f0, 0x3f148,
2082 0x3f160, 0x3f19c,
2083 0x3f1f0, 0x3f2e4,
2084 0x3f2f8, 0x3f3e4,
2085 0x3f3f8, 0x3f448,
2086 0x3f460, 0x3f49c,
2087 0x3f4f0, 0x3f548,
2088 0x3f560, 0x3f59c,
2089 0x3f5f0, 0x3f6e4,
2090 0x3f6f8, 0x3f7e4,
2091 0x3f7f8, 0x3f7fc,
2092 0x3f814, 0x3f814,
2093 0x3f82c, 0x3f82c,
2094 0x3f880, 0x3f88c,
2095 0x3f8e8, 0x3f8ec,
2096 0x3f900, 0x3f948,
2097 0x3f960, 0x3f99c,
2098 0x3f9f0, 0x3fae4,
2099 0x3faf8, 0x3fb10,
2100 0x3fb28, 0x3fb28,
2101 0x3fb3c, 0x3fb50,
2102 0x3fbf0, 0x3fc10,
2103 0x3fc28, 0x3fc28,
2104 0x3fc3c, 0x3fc50,
2105 0x3fcf0, 0x3fcfc,
2106 0x40000, 0x4000c,
2107 0x40040, 0x40068,
2108 0x40080, 0x40144,
2109 0x40180, 0x4018c,
2110 0x40200, 0x40298,
2111 0x402ac, 0x4033c,
2112 0x403f8, 0x403fc,
2113 0x41304, 0x413c4,
2114 0x41400, 0x4141c,
2115 0x41480, 0x414d0,
2116 0x44000, 0x44078,
2117 0x440c0, 0x44278,
2118 0x442c0, 0x44478,
2119 0x444c0, 0x44678,
2120 0x446c0, 0x44878,
2121 0x448c0, 0x449fc,
2122 0x45000, 0x45068,
2123 0x45080, 0x45084,
2124 0x450a0, 0x450b0,
2125 0x45200, 0x45268,
2126 0x45280, 0x45284,
2127 0x452a0, 0x452b0,
2128 0x460c0, 0x460e4,
2129 0x47000, 0x4708c,
2130 0x47200, 0x47250,
2131 0x47400, 0x47420,
2132 0x47600, 0x47618,
2133 0x47800, 0x47814,
2134 0x48000, 0x4800c,
2135 0x48040, 0x48068,
2136 0x48080, 0x48144,
2137 0x48180, 0x4818c,
2138 0x48200, 0x48298,
2139 0x482ac, 0x4833c,
2140 0x483f8, 0x483fc,
2141 0x49304, 0x493c4,
2142 0x49400, 0x4941c,
2143 0x49480, 0x494d0,
2144 0x4c000, 0x4c078,
2145 0x4c0c0, 0x4c278,
2146 0x4c2c0, 0x4c478,
2147 0x4c4c0, 0x4c678,
2148 0x4c6c0, 0x4c878,
2149 0x4c8c0, 0x4c9fc,
2150 0x4d000, 0x4d068,
2151 0x4d080, 0x4d084,
2152 0x4d0a0, 0x4d0b0,
2153 0x4d200, 0x4d268,
2154 0x4d280, 0x4d284,
2155 0x4d2a0, 0x4d2b0,
2156 0x4e0c0, 0x4e0e4,
2157 0x4f000, 0x4f08c,
2158 0x4f200, 0x4f250,
2159 0x4f400, 0x4f420,
2160 0x4f600, 0x4f618,
2161 0x4f800, 0x4f814,
2162 0x50000, 0x500cc,
2163 0x50400, 0x50400,
2164 0x50800, 0x508cc,
2165 0x50c00, 0x50c00,
2166 0x51000, 0x5101c,
2167 0x51300, 0x51308,
2168 };
2169
2170 int i;
2171 struct adapter *ap = netdev2adap(dev);
2172 static const unsigned int *reg_ranges;
2173 int arr_size = 0, buf_size = 0;
2174
2175 if (is_t4(ap->params.chip)) {
2176 reg_ranges = &t4_reg_ranges[0];
2177 arr_size = ARRAY_SIZE(t4_reg_ranges);
2178 buf_size = T4_REGMAP_SIZE;
2179 } else {
2180 reg_ranges = &t5_reg_ranges[0];
2181 arr_size = ARRAY_SIZE(t5_reg_ranges);
2182 buf_size = T5_REGMAP_SIZE;
2183 }
2184
2185 regs->version = mk_adap_vers(ap);
2186
2187 memset(buf, 0, buf_size);
2188 for (i = 0; i < arr_size; i += 2)
2189 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2190 }
2191
2192 static int restart_autoneg(struct net_device *dev)
2193 {
2194 struct port_info *p = netdev_priv(dev);
2195
2196 if (!netif_running(dev))
2197 return -EAGAIN;
2198 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2199 return -EINVAL;
2200 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2201 return 0;
2202 }
2203
2204 static int identify_port(struct net_device *dev,
2205 enum ethtool_phys_id_state state)
2206 {
2207 unsigned int val;
2208 struct adapter *adap = netdev2adap(dev);
2209
2210 if (state == ETHTOOL_ID_ACTIVE)
2211 val = 0xffff;
2212 else if (state == ETHTOOL_ID_INACTIVE)
2213 val = 0;
2214 else
2215 return -EINVAL;
2216
2217 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2218 }
2219
2220 static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
2221 {
2222 unsigned int v = 0;
2223
2224 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2225 type == FW_PORT_TYPE_BT_XAUI) {
2226 v |= SUPPORTED_TP;
2227 if (caps & FW_PORT_CAP_SPEED_100M)
2228 v |= SUPPORTED_100baseT_Full;
2229 if (caps & FW_PORT_CAP_SPEED_1G)
2230 v |= SUPPORTED_1000baseT_Full;
2231 if (caps & FW_PORT_CAP_SPEED_10G)
2232 v |= SUPPORTED_10000baseT_Full;
2233 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2234 v |= SUPPORTED_Backplane;
2235 if (caps & FW_PORT_CAP_SPEED_1G)
2236 v |= SUPPORTED_1000baseKX_Full;
2237 if (caps & FW_PORT_CAP_SPEED_10G)
2238 v |= SUPPORTED_10000baseKX4_Full;
2239 } else if (type == FW_PORT_TYPE_KR)
2240 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2241 else if (type == FW_PORT_TYPE_BP_AP)
2242 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2243 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2244 else if (type == FW_PORT_TYPE_BP4_AP)
2245 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2246 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2247 SUPPORTED_10000baseKX4_Full;
2248 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2249 type == FW_PORT_TYPE_FIBER_XAUI ||
2250 type == FW_PORT_TYPE_SFP ||
2251 type == FW_PORT_TYPE_QSFP_10G ||
2252 type == FW_PORT_TYPE_QSA) {
2253 v |= SUPPORTED_FIBRE;
2254 if (caps & FW_PORT_CAP_SPEED_1G)
2255 v |= SUPPORTED_1000baseT_Full;
2256 if (caps & FW_PORT_CAP_SPEED_10G)
2257 v |= SUPPORTED_10000baseT_Full;
2258 } else if (type == FW_PORT_TYPE_BP40_BA ||
2259 type == FW_PORT_TYPE_QSFP) {
2260 v |= SUPPORTED_40000baseSR4_Full;
2261 v |= SUPPORTED_FIBRE;
2262 }
2263
2264 if (caps & FW_PORT_CAP_ANEG)
2265 v |= SUPPORTED_Autoneg;
2266 return v;
2267 }
2268
2269 static unsigned int to_fw_linkcaps(unsigned int caps)
2270 {
2271 unsigned int v = 0;
2272
2273 if (caps & ADVERTISED_100baseT_Full)
2274 v |= FW_PORT_CAP_SPEED_100M;
2275 if (caps & ADVERTISED_1000baseT_Full)
2276 v |= FW_PORT_CAP_SPEED_1G;
2277 if (caps & ADVERTISED_10000baseT_Full)
2278 v |= FW_PORT_CAP_SPEED_10G;
2279 if (caps & ADVERTISED_40000baseSR4_Full)
2280 v |= FW_PORT_CAP_SPEED_40G;
2281 return v;
2282 }
2283
2284 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2285 {
2286 const struct port_info *p = netdev_priv(dev);
2287
2288 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2289 p->port_type == FW_PORT_TYPE_BT_XFI ||
2290 p->port_type == FW_PORT_TYPE_BT_XAUI)
2291 cmd->port = PORT_TP;
2292 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2293 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2294 cmd->port = PORT_FIBRE;
2295 else if (p->port_type == FW_PORT_TYPE_SFP ||
2296 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2297 p->port_type == FW_PORT_TYPE_QSA ||
2298 p->port_type == FW_PORT_TYPE_QSFP) {
2299 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2300 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2301 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2302 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2303 cmd->port = PORT_FIBRE;
2304 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2305 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2306 cmd->port = PORT_DA;
2307 else
2308 cmd->port = PORT_OTHER;
2309 } else
2310 cmd->port = PORT_OTHER;
2311
2312 if (p->mdio_addr >= 0) {
2313 cmd->phy_address = p->mdio_addr;
2314 cmd->transceiver = XCVR_EXTERNAL;
2315 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2316 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2317 } else {
2318 cmd->phy_address = 0; /* not really, but no better option */
2319 cmd->transceiver = XCVR_INTERNAL;
2320 cmd->mdio_support = 0;
2321 }
2322
2323 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2324 cmd->advertising = from_fw_linkcaps(p->port_type,
2325 p->link_cfg.advertising);
2326 ethtool_cmd_speed_set(cmd,
2327 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2328 cmd->duplex = DUPLEX_FULL;
2329 cmd->autoneg = p->link_cfg.autoneg;
2330 cmd->maxtxpkt = 0;
2331 cmd->maxrxpkt = 0;
2332 return 0;
2333 }
2334
2335 static unsigned int speed_to_caps(int speed)
2336 {
2337 if (speed == 100)
2338 return FW_PORT_CAP_SPEED_100M;
2339 if (speed == 1000)
2340 return FW_PORT_CAP_SPEED_1G;
2341 if (speed == 10000)
2342 return FW_PORT_CAP_SPEED_10G;
2343 if (speed == 40000)
2344 return FW_PORT_CAP_SPEED_40G;
2345 return 0;
2346 }
2347
2348 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2349 {
2350 unsigned int cap;
2351 struct port_info *p = netdev_priv(dev);
2352 struct link_config *lc = &p->link_cfg;
2353 u32 speed = ethtool_cmd_speed(cmd);
2354
2355 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2356 return -EINVAL;
2357
2358 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2359 /*
2360 * PHY offers a single speed. See if that's what's
2361 * being requested.
2362 */
2363 if (cmd->autoneg == AUTONEG_DISABLE &&
2364 (lc->supported & speed_to_caps(speed)))
2365 return 0;
2366 return -EINVAL;
2367 }
2368
2369 if (cmd->autoneg == AUTONEG_DISABLE) {
2370 cap = speed_to_caps(speed);
2371
2372 if (!(lc->supported & cap) ||
2373 (speed == 1000) ||
2374 (speed == 10000) ||
2375 (speed == 40000))
2376 return -EINVAL;
2377 lc->requested_speed = cap;
2378 lc->advertising = 0;
2379 } else {
2380 cap = to_fw_linkcaps(cmd->advertising);
2381 if (!(lc->supported & cap))
2382 return -EINVAL;
2383 lc->requested_speed = 0;
2384 lc->advertising = cap | FW_PORT_CAP_ANEG;
2385 }
2386 lc->autoneg = cmd->autoneg;
2387
2388 if (netif_running(dev))
2389 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2390 lc);
2391 return 0;
2392 }
2393
2394 static void get_pauseparam(struct net_device *dev,
2395 struct ethtool_pauseparam *epause)
2396 {
2397 struct port_info *p = netdev_priv(dev);
2398
2399 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2400 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2401 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2402 }
2403
2404 static int set_pauseparam(struct net_device *dev,
2405 struct ethtool_pauseparam *epause)
2406 {
2407 struct port_info *p = netdev_priv(dev);
2408 struct link_config *lc = &p->link_cfg;
2409
2410 if (epause->autoneg == AUTONEG_DISABLE)
2411 lc->requested_fc = 0;
2412 else if (lc->supported & FW_PORT_CAP_ANEG)
2413 lc->requested_fc = PAUSE_AUTONEG;
2414 else
2415 return -EINVAL;
2416
2417 if (epause->rx_pause)
2418 lc->requested_fc |= PAUSE_RX;
2419 if (epause->tx_pause)
2420 lc->requested_fc |= PAUSE_TX;
2421 if (netif_running(dev))
2422 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2423 lc);
2424 return 0;
2425 }
2426
2427 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2428 {
2429 const struct port_info *pi = netdev_priv(dev);
2430 const struct sge *s = &pi->adapter->sge;
2431
2432 e->rx_max_pending = MAX_RX_BUFFERS;
2433 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2434 e->rx_jumbo_max_pending = 0;
2435 e->tx_max_pending = MAX_TXQ_ENTRIES;
2436
2437 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2438 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2439 e->rx_jumbo_pending = 0;
2440 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2441 }
2442
2443 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2444 {
2445 int i;
2446 const struct port_info *pi = netdev_priv(dev);
2447 struct adapter *adapter = pi->adapter;
2448 struct sge *s = &adapter->sge;
2449
2450 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2451 e->tx_pending > MAX_TXQ_ENTRIES ||
2452 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2453 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2454 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2455 return -EINVAL;
2456
2457 if (adapter->flags & FULL_INIT_DONE)
2458 return -EBUSY;
2459
2460 for (i = 0; i < pi->nqsets; ++i) {
2461 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2462 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2463 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2464 }
2465 return 0;
2466 }
2467
2468 static int closest_timer(const struct sge *s, int time)
2469 {
2470 int i, delta, match = 0, min_delta = INT_MAX;
2471
2472 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2473 delta = time - s->timer_val[i];
2474 if (delta < 0)
2475 delta = -delta;
2476 if (delta < min_delta) {
2477 min_delta = delta;
2478 match = i;
2479 }
2480 }
2481 return match;
2482 }
2483
2484 static int closest_thres(const struct sge *s, int thres)
2485 {
2486 int i, delta, match = 0, min_delta = INT_MAX;
2487
2488 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2489 delta = thres - s->counter_val[i];
2490 if (delta < 0)
2491 delta = -delta;
2492 if (delta < min_delta) {
2493 min_delta = delta;
2494 match = i;
2495 }
2496 }
2497 return match;
2498 }
2499
2500 /*
2501 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2502 */
2503 unsigned int qtimer_val(const struct adapter *adap,
2504 const struct sge_rspq *q)
2505 {
2506 unsigned int idx = q->intr_params >> 1;
2507
2508 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2509 }
2510
2511 /**
2512 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2513 * @q: the Rx queue
2514 * @us: the hold-off time in us, or 0 to disable timer
2515 * @cnt: the hold-off packet count, or 0 to disable counter
2516 *
2517 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2518 * one of the two needs to be enabled for the queue to generate interrupts.
2519 */
2520 static int set_rspq_intr_params(struct sge_rspq *q,
2521 unsigned int us, unsigned int cnt)
2522 {
2523 struct adapter *adap = q->adap;
2524
2525 if ((us | cnt) == 0)
2526 cnt = 1;
2527
2528 if (cnt) {
2529 int err;
2530 u32 v, new_idx;
2531
2532 new_idx = closest_thres(&adap->sge, cnt);
2533 if (q->desc && q->pktcnt_idx != new_idx) {
2534 /* the queue has already been created, update it */
2535 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
2536 FW_PARAMS_PARAM_X_V(
2537 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2538 FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
2539 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2540 &new_idx);
2541 if (err)
2542 return err;
2543 }
2544 q->pktcnt_idx = new_idx;
2545 }
2546
2547 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2548 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2549 return 0;
2550 }
2551
2552 /**
2553 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2554 * @dev: the network device
2555 * @us: the hold-off time in us, or 0 to disable timer
2556 * @cnt: the hold-off packet count, or 0 to disable counter
2557 *
2558 * Set the RX interrupt hold-off parameters for a network device.
2559 */
2560 static int set_rx_intr_params(struct net_device *dev,
2561 unsigned int us, unsigned int cnt)
2562 {
2563 int i, err;
2564 struct port_info *pi = netdev_priv(dev);
2565 struct adapter *adap = pi->adapter;
2566 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2567
2568 for (i = 0; i < pi->nqsets; i++, q++) {
2569 err = set_rspq_intr_params(&q->rspq, us, cnt);
2570 if (err)
2571 return err;
2572 }
2573 return 0;
2574 }
2575
2576 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2577 {
2578 int i;
2579 struct port_info *pi = netdev_priv(dev);
2580 struct adapter *adap = pi->adapter;
2581 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2582
2583 for (i = 0; i < pi->nqsets; i++, q++)
2584 q->rspq.adaptive_rx = adaptive_rx;
2585
2586 return 0;
2587 }
2588
2589 static int get_adaptive_rx_setting(struct net_device *dev)
2590 {
2591 struct port_info *pi = netdev_priv(dev);
2592 struct adapter *adap = pi->adapter;
2593 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2594
2595 return q->rspq.adaptive_rx;
2596 }
2597
2598 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2599 {
2600 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2601 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2602 c->rx_max_coalesced_frames);
2603 }
2604
2605 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2606 {
2607 const struct port_info *pi = netdev_priv(dev);
2608 const struct adapter *adap = pi->adapter;
2609 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2610
2611 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2612 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2613 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2614 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2615 return 0;
2616 }
2617
2618 /**
2619 * eeprom_ptov - translate a physical EEPROM address to virtual
2620 * @phys_addr: the physical EEPROM address
2621 * @fn: the PCI function number
2622 * @sz: size of function-specific area
2623 *
2624 * Translate a physical EEPROM address to virtual. The first 1K is
2625 * accessed through virtual addresses starting at 31K, the rest is
2626 * accessed through virtual addresses starting at 0.
2627 *
2628 * The mapping is as follows:
2629 * [0..1K) -> [31K..32K)
2630 * [1K..1K+A) -> [31K-A..31K)
2631 * [1K+A..ES) -> [0..ES-A-1K)
2632 *
2633 * where A = @fn * @sz, and ES = EEPROM size.
2634 */
2635 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2636 {
2637 fn *= sz;
2638 if (phys_addr < 1024)
2639 return phys_addr + (31 << 10);
2640 if (phys_addr < 1024 + fn)
2641 return 31744 - fn + phys_addr - 1024;
2642 if (phys_addr < EEPROMSIZE)
2643 return phys_addr - 1024 - fn;
2644 return -EINVAL;
2645 }
2646
2647 /*
2648 * The next two routines implement eeprom read/write from physical addresses.
2649 */
2650 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2651 {
2652 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2653
2654 if (vaddr >= 0)
2655 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2656 return vaddr < 0 ? vaddr : 0;
2657 }
2658
2659 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2660 {
2661 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2662
2663 if (vaddr >= 0)
2664 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2665 return vaddr < 0 ? vaddr : 0;
2666 }
2667
2668 #define EEPROM_MAGIC 0x38E2F10C
2669
2670 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2671 u8 *data)
2672 {
2673 int i, err = 0;
2674 struct adapter *adapter = netdev2adap(dev);
2675
2676 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2677 if (!buf)
2678 return -ENOMEM;
2679
2680 e->magic = EEPROM_MAGIC;
2681 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2682 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2683
2684 if (!err)
2685 memcpy(data, buf + e->offset, e->len);
2686 kfree(buf);
2687 return err;
2688 }
2689
2690 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2691 u8 *data)
2692 {
2693 u8 *buf;
2694 int err = 0;
2695 u32 aligned_offset, aligned_len, *p;
2696 struct adapter *adapter = netdev2adap(dev);
2697
2698 if (eeprom->magic != EEPROM_MAGIC)
2699 return -EINVAL;
2700
2701 aligned_offset = eeprom->offset & ~3;
2702 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2703
2704 if (adapter->fn > 0) {
2705 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2706
2707 if (aligned_offset < start ||
2708 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2709 return -EPERM;
2710 }
2711
2712 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2713 /*
2714 * RMW possibly needed for first or last words.
2715 */
2716 buf = kmalloc(aligned_len, GFP_KERNEL);
2717 if (!buf)
2718 return -ENOMEM;
2719 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2720 if (!err && aligned_len > 4)
2721 err = eeprom_rd_phys(adapter,
2722 aligned_offset + aligned_len - 4,
2723 (u32 *)&buf[aligned_len - 4]);
2724 if (err)
2725 goto out;
2726 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2727 } else
2728 buf = data;
2729
2730 err = t4_seeprom_wp(adapter, false);
2731 if (err)
2732 goto out;
2733
2734 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2735 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2736 aligned_offset += 4;
2737 }
2738
2739 if (!err)
2740 err = t4_seeprom_wp(adapter, true);
2741 out:
2742 if (buf != data)
2743 kfree(buf);
2744 return err;
2745 }
2746
2747 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2748 {
2749 int ret;
2750 const struct firmware *fw;
2751 struct adapter *adap = netdev2adap(netdev);
2752 unsigned int mbox = PCIE_FW_MASTER_M + 1;
2753
2754 ef->data[sizeof(ef->data) - 1] = '\0';
2755 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2756 if (ret < 0)
2757 return ret;
2758
2759 /* If the adapter has been fully initialized then we'll go ahead and
2760 * try to get the firmware's cooperation in upgrading to the new
2761 * firmware image otherwise we'll try to do the entire job from the
2762 * host ... and we always "force" the operation in this path.
2763 */
2764 if (adap->flags & FULL_INIT_DONE)
2765 mbox = adap->mbox;
2766
2767 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2768 release_firmware(fw);
2769 if (!ret)
2770 dev_info(adap->pdev_dev, "loaded firmware %s,"
2771 " reload cxgb4 driver\n", ef->data);
2772 return ret;
2773 }
2774
2775 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2776 #define BCAST_CRC 0xa0ccc1a6
2777
2778 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2779 {
2780 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2781 wol->wolopts = netdev2adap(dev)->wol;
2782 memset(&wol->sopass, 0, sizeof(wol->sopass));
2783 }
2784
2785 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2786 {
2787 int err = 0;
2788 struct port_info *pi = netdev_priv(dev);
2789
2790 if (wol->wolopts & ~WOL_SUPPORTED)
2791 return -EINVAL;
2792 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2793 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2794 if (wol->wolopts & WAKE_BCAST) {
2795 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2796 ~0ULL, 0, false);
2797 if (!err)
2798 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2799 ~6ULL, ~0ULL, BCAST_CRC, true);
2800 } else
2801 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2802 return err;
2803 }
2804
2805 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2806 {
2807 const struct port_info *pi = netdev_priv(dev);
2808 netdev_features_t changed = dev->features ^ features;
2809 int err;
2810
2811 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2812 return 0;
2813
2814 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2815 -1, -1, -1,
2816 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2817 if (unlikely(err))
2818 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2819 return err;
2820 }
2821
2822 static u32 get_rss_table_size(struct net_device *dev)
2823 {
2824 const struct port_info *pi = netdev_priv(dev);
2825
2826 return pi->rss_size;
2827 }
2828
2829 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
2830 {
2831 const struct port_info *pi = netdev_priv(dev);
2832 unsigned int n = pi->rss_size;
2833
2834 if (hfunc)
2835 *hfunc = ETH_RSS_HASH_TOP;
2836 if (!p)
2837 return 0;
2838 while (n--)
2839 p[n] = pi->rss[n];
2840 return 0;
2841 }
2842
2843 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
2844 const u8 hfunc)
2845 {
2846 unsigned int i;
2847 struct port_info *pi = netdev_priv(dev);
2848
2849 /* We require at least one supported parameter to be changed and no
2850 * change in any of the unsupported parameters
2851 */
2852 if (key ||
2853 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
2854 return -EOPNOTSUPP;
2855 if (!p)
2856 return 0;
2857
2858 for (i = 0; i < pi->rss_size; i++)
2859 pi->rss[i] = p[i];
2860 if (pi->adapter->flags & FULL_INIT_DONE)
2861 return write_rss(pi, pi->rss);
2862 return 0;
2863 }
2864
2865 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2866 u32 *rules)
2867 {
2868 const struct port_info *pi = netdev_priv(dev);
2869
2870 switch (info->cmd) {
2871 case ETHTOOL_GRXFH: {
2872 unsigned int v = pi->rss_mode;
2873
2874 info->data = 0;
2875 switch (info->flow_type) {
2876 case TCP_V4_FLOW:
2877 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
2878 info->data = RXH_IP_SRC | RXH_IP_DST |
2879 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2880 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2881 info->data = RXH_IP_SRC | RXH_IP_DST;
2882 break;
2883 case UDP_V4_FLOW:
2884 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
2885 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
2886 info->data = RXH_IP_SRC | RXH_IP_DST |
2887 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2888 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2889 info->data = RXH_IP_SRC | RXH_IP_DST;
2890 break;
2891 case SCTP_V4_FLOW:
2892 case AH_ESP_V4_FLOW:
2893 case IPV4_FLOW:
2894 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
2895 info->data = RXH_IP_SRC | RXH_IP_DST;
2896 break;
2897 case TCP_V6_FLOW:
2898 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
2899 info->data = RXH_IP_SRC | RXH_IP_DST |
2900 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2901 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
2902 info->data = RXH_IP_SRC | RXH_IP_DST;
2903 break;
2904 case UDP_V6_FLOW:
2905 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
2906 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
2907 info->data = RXH_IP_SRC | RXH_IP_DST |
2908 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2909 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
2910 info->data = RXH_IP_SRC | RXH_IP_DST;
2911 break;
2912 case SCTP_V6_FLOW:
2913 case AH_ESP_V6_FLOW:
2914 case IPV6_FLOW:
2915 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
2916 info->data = RXH_IP_SRC | RXH_IP_DST;
2917 break;
2918 }
2919 return 0;
2920 }
2921 case ETHTOOL_GRXRINGS:
2922 info->data = pi->nqsets;
2923 return 0;
2924 }
2925 return -EOPNOTSUPP;
2926 }
2927
2928 static const struct ethtool_ops cxgb_ethtool_ops = {
2929 .get_settings = get_settings,
2930 .set_settings = set_settings,
2931 .get_drvinfo = get_drvinfo,
2932 .get_msglevel = get_msglevel,
2933 .set_msglevel = set_msglevel,
2934 .get_ringparam = get_sge_param,
2935 .set_ringparam = set_sge_param,
2936 .get_coalesce = get_coalesce,
2937 .set_coalesce = set_coalesce,
2938 .get_eeprom_len = get_eeprom_len,
2939 .get_eeprom = get_eeprom,
2940 .set_eeprom = set_eeprom,
2941 .get_pauseparam = get_pauseparam,
2942 .set_pauseparam = set_pauseparam,
2943 .get_link = ethtool_op_get_link,
2944 .get_strings = get_strings,
2945 .set_phys_id = identify_port,
2946 .nway_reset = restart_autoneg,
2947 .get_sset_count = get_sset_count,
2948 .get_ethtool_stats = get_stats,
2949 .get_regs_len = get_regs_len,
2950 .get_regs = get_regs,
2951 .get_wol = get_wol,
2952 .set_wol = set_wol,
2953 .get_rxnfc = get_rxnfc,
2954 .get_rxfh_indir_size = get_rss_table_size,
2955 .get_rxfh = get_rss_table,
2956 .set_rxfh = set_rss_table,
2957 .flash_device = set_flash,
2958 };
2959
2960 static int setup_debugfs(struct adapter *adap)
2961 {
2962 if (IS_ERR_OR_NULL(adap->debugfs_root))
2963 return -1;
2964
2965 #ifdef CONFIG_DEBUG_FS
2966 t4_setup_debugfs(adap);
2967 #endif
2968 return 0;
2969 }
2970
2971 /*
2972 * upper-layer driver support
2973 */
2974
2975 /*
2976 * Allocate an active-open TID and set it to the supplied value.
2977 */
2978 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2979 {
2980 int atid = -1;
2981
2982 spin_lock_bh(&t->atid_lock);
2983 if (t->afree) {
2984 union aopen_entry *p = t->afree;
2985
2986 atid = (p - t->atid_tab) + t->atid_base;
2987 t->afree = p->next;
2988 p->data = data;
2989 t->atids_in_use++;
2990 }
2991 spin_unlock_bh(&t->atid_lock);
2992 return atid;
2993 }
2994 EXPORT_SYMBOL(cxgb4_alloc_atid);
2995
2996 /*
2997 * Release an active-open TID.
2998 */
2999 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3000 {
3001 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3002
3003 spin_lock_bh(&t->atid_lock);
3004 p->next = t->afree;
3005 t->afree = p;
3006 t->atids_in_use--;
3007 spin_unlock_bh(&t->atid_lock);
3008 }
3009 EXPORT_SYMBOL(cxgb4_free_atid);
3010
3011 /*
3012 * Allocate a server TID and set it to the supplied value.
3013 */
3014 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3015 {
3016 int stid;
3017
3018 spin_lock_bh(&t->stid_lock);
3019 if (family == PF_INET) {
3020 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3021 if (stid < t->nstids)
3022 __set_bit(stid, t->stid_bmap);
3023 else
3024 stid = -1;
3025 } else {
3026 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3027 if (stid < 0)
3028 stid = -1;
3029 }
3030 if (stid >= 0) {
3031 t->stid_tab[stid].data = data;
3032 stid += t->stid_base;
3033 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3034 * This is equivalent to 4 TIDs. With CLIP enabled it
3035 * needs 2 TIDs.
3036 */
3037 if (family == PF_INET)
3038 t->stids_in_use++;
3039 else
3040 t->stids_in_use += 4;
3041 }
3042 spin_unlock_bh(&t->stid_lock);
3043 return stid;
3044 }
3045 EXPORT_SYMBOL(cxgb4_alloc_stid);
3046
3047 /* Allocate a server filter TID and set it to the supplied value.
3048 */
3049 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3050 {
3051 int stid;
3052
3053 spin_lock_bh(&t->stid_lock);
3054 if (family == PF_INET) {
3055 stid = find_next_zero_bit(t->stid_bmap,
3056 t->nstids + t->nsftids, t->nstids);
3057 if (stid < (t->nstids + t->nsftids))
3058 __set_bit(stid, t->stid_bmap);
3059 else
3060 stid = -1;
3061 } else {
3062 stid = -1;
3063 }
3064 if (stid >= 0) {
3065 t->stid_tab[stid].data = data;
3066 stid -= t->nstids;
3067 stid += t->sftid_base;
3068 t->stids_in_use++;
3069 }
3070 spin_unlock_bh(&t->stid_lock);
3071 return stid;
3072 }
3073 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3074
3075 /* Release a server TID.
3076 */
3077 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3078 {
3079 /* Is it a server filter TID? */
3080 if (t->nsftids && (stid >= t->sftid_base)) {
3081 stid -= t->sftid_base;
3082 stid += t->nstids;
3083 } else {
3084 stid -= t->stid_base;
3085 }
3086
3087 spin_lock_bh(&t->stid_lock);
3088 if (family == PF_INET)
3089 __clear_bit(stid, t->stid_bmap);
3090 else
3091 bitmap_release_region(t->stid_bmap, stid, 2);
3092 t->stid_tab[stid].data = NULL;
3093 if (family == PF_INET)
3094 t->stids_in_use--;
3095 else
3096 t->stids_in_use -= 4;
3097 spin_unlock_bh(&t->stid_lock);
3098 }
3099 EXPORT_SYMBOL(cxgb4_free_stid);
3100
3101 /*
3102 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3103 */
3104 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3105 unsigned int tid)
3106 {
3107 struct cpl_tid_release *req;
3108
3109 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3110 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3111 INIT_TP_WR(req, tid);
3112 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3113 }
3114
3115 /*
3116 * Queue a TID release request and if necessary schedule a work queue to
3117 * process it.
3118 */
3119 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3120 unsigned int tid)
3121 {
3122 void **p = &t->tid_tab[tid];
3123 struct adapter *adap = container_of(t, struct adapter, tids);
3124
3125 spin_lock_bh(&adap->tid_release_lock);
3126 *p = adap->tid_release_head;
3127 /* Low 2 bits encode the Tx channel number */
3128 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3129 if (!adap->tid_release_task_busy) {
3130 adap->tid_release_task_busy = true;
3131 queue_work(adap->workq, &adap->tid_release_task);
3132 }
3133 spin_unlock_bh(&adap->tid_release_lock);
3134 }
3135
3136 /*
3137 * Process the list of pending TID release requests.
3138 */
3139 static void process_tid_release_list(struct work_struct *work)
3140 {
3141 struct sk_buff *skb;
3142 struct adapter *adap;
3143
3144 adap = container_of(work, struct adapter, tid_release_task);
3145
3146 spin_lock_bh(&adap->tid_release_lock);
3147 while (adap->tid_release_head) {
3148 void **p = adap->tid_release_head;
3149 unsigned int chan = (uintptr_t)p & 3;
3150 p = (void *)p - chan;
3151
3152 adap->tid_release_head = *p;
3153 *p = NULL;
3154 spin_unlock_bh(&adap->tid_release_lock);
3155
3156 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3157 GFP_KERNEL)))
3158 schedule_timeout_uninterruptible(1);
3159
3160 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3161 t4_ofld_send(adap, skb);
3162 spin_lock_bh(&adap->tid_release_lock);
3163 }
3164 adap->tid_release_task_busy = false;
3165 spin_unlock_bh(&adap->tid_release_lock);
3166 }
3167
3168 /*
3169 * Release a TID and inform HW. If we are unable to allocate the release
3170 * message we defer to a work queue.
3171 */
3172 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3173 {
3174 void *old;
3175 struct sk_buff *skb;
3176 struct adapter *adap = container_of(t, struct adapter, tids);
3177
3178 old = t->tid_tab[tid];
3179 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3180 if (likely(skb)) {
3181 t->tid_tab[tid] = NULL;
3182 mk_tid_release(skb, chan, tid);
3183 t4_ofld_send(adap, skb);
3184 } else
3185 cxgb4_queue_tid_release(t, chan, tid);
3186 if (old)
3187 atomic_dec(&t->tids_in_use);
3188 }
3189 EXPORT_SYMBOL(cxgb4_remove_tid);
3190
3191 /*
3192 * Allocate and initialize the TID tables. Returns 0 on success.
3193 */
3194 static int tid_init(struct tid_info *t)
3195 {
3196 size_t size;
3197 unsigned int stid_bmap_size;
3198 unsigned int natids = t->natids;
3199 struct adapter *adap = container_of(t, struct adapter, tids);
3200
3201 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3202 size = t->ntids * sizeof(*t->tid_tab) +
3203 natids * sizeof(*t->atid_tab) +
3204 t->nstids * sizeof(*t->stid_tab) +
3205 t->nsftids * sizeof(*t->stid_tab) +
3206 stid_bmap_size * sizeof(long) +
3207 t->nftids * sizeof(*t->ftid_tab) +
3208 t->nsftids * sizeof(*t->ftid_tab);
3209
3210 t->tid_tab = t4_alloc_mem(size);
3211 if (!t->tid_tab)
3212 return -ENOMEM;
3213
3214 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3215 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3216 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3217 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3218 spin_lock_init(&t->stid_lock);
3219 spin_lock_init(&t->atid_lock);
3220
3221 t->stids_in_use = 0;
3222 t->afree = NULL;
3223 t->atids_in_use = 0;
3224 atomic_set(&t->tids_in_use, 0);
3225
3226 /* Setup the free list for atid_tab and clear the stid bitmap. */
3227 if (natids) {
3228 while (--natids)
3229 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3230 t->afree = t->atid_tab;
3231 }
3232 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3233 /* Reserve stid 0 for T4/T5 adapters */
3234 if (!t->stid_base &&
3235 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3236 __set_bit(0, t->stid_bmap);
3237
3238 return 0;
3239 }
3240
3241 /**
3242 * cxgb4_create_server - create an IP server
3243 * @dev: the device
3244 * @stid: the server TID
3245 * @sip: local IP address to bind server to
3246 * @sport: the server's TCP port
3247 * @queue: queue to direct messages from this server to
3248 *
3249 * Create an IP server for the given port and address.
3250 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3251 */
3252 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3253 __be32 sip, __be16 sport, __be16 vlan,
3254 unsigned int queue)
3255 {
3256 unsigned int chan;
3257 struct sk_buff *skb;
3258 struct adapter *adap;
3259 struct cpl_pass_open_req *req;
3260 int ret;
3261
3262 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3263 if (!skb)
3264 return -ENOMEM;
3265
3266 adap = netdev2adap(dev);
3267 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3268 INIT_TP_WR(req, 0);
3269 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3270 req->local_port = sport;
3271 req->peer_port = htons(0);
3272 req->local_ip = sip;
3273 req->peer_ip = htonl(0);
3274 chan = rxq_to_chan(&adap->sge, queue);
3275 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3276 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3277 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
3278 ret = t4_mgmt_tx(adap, skb);
3279 return net_xmit_eval(ret);
3280 }
3281 EXPORT_SYMBOL(cxgb4_create_server);
3282
3283 /* cxgb4_create_server6 - create an IPv6 server
3284 * @dev: the device
3285 * @stid: the server TID
3286 * @sip: local IPv6 address to bind server to
3287 * @sport: the server's TCP port
3288 * @queue: queue to direct messages from this server to
3289 *
3290 * Create an IPv6 server for the given port and address.
3291 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3292 */
3293 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3294 const struct in6_addr *sip, __be16 sport,
3295 unsigned int queue)
3296 {
3297 unsigned int chan;
3298 struct sk_buff *skb;
3299 struct adapter *adap;
3300 struct cpl_pass_open_req6 *req;
3301 int ret;
3302
3303 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3304 if (!skb)
3305 return -ENOMEM;
3306
3307 adap = netdev2adap(dev);
3308 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3309 INIT_TP_WR(req, 0);
3310 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3311 req->local_port = sport;
3312 req->peer_port = htons(0);
3313 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3314 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3315 req->peer_ip_hi = cpu_to_be64(0);
3316 req->peer_ip_lo = cpu_to_be64(0);
3317 chan = rxq_to_chan(&adap->sge, queue);
3318 req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
3319 req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
3320 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
3321 ret = t4_mgmt_tx(adap, skb);
3322 return net_xmit_eval(ret);
3323 }
3324 EXPORT_SYMBOL(cxgb4_create_server6);
3325
3326 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3327 unsigned int queue, bool ipv6)
3328 {
3329 struct sk_buff *skb;
3330 struct adapter *adap;
3331 struct cpl_close_listsvr_req *req;
3332 int ret;
3333
3334 adap = netdev2adap(dev);
3335
3336 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3337 if (!skb)
3338 return -ENOMEM;
3339
3340 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3341 INIT_TP_WR(req, 0);
3342 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3343 req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
3344 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
3345 ret = t4_mgmt_tx(adap, skb);
3346 return net_xmit_eval(ret);
3347 }
3348 EXPORT_SYMBOL(cxgb4_remove_server);
3349
3350 /**
3351 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3352 * @mtus: the HW MTU table
3353 * @mtu: the target MTU
3354 * @idx: index of selected entry in the MTU table
3355 *
3356 * Returns the index and the value in the HW MTU table that is closest to
3357 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3358 * table, in which case that smallest available value is selected.
3359 */
3360 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3361 unsigned int *idx)
3362 {
3363 unsigned int i = 0;
3364
3365 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3366 ++i;
3367 if (idx)
3368 *idx = i;
3369 return mtus[i];
3370 }
3371 EXPORT_SYMBOL(cxgb4_best_mtu);
3372
3373 /**
3374 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3375 * @mtus: the HW MTU table
3376 * @header_size: Header Size
3377 * @data_size_max: maximum Data Segment Size
3378 * @data_size_align: desired Data Segment Size Alignment (2^N)
3379 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3380 *
3381 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3382 * MTU Table based solely on a Maximum MTU parameter, we break that
3383 * parameter up into a Header Size and Maximum Data Segment Size, and
3384 * provide a desired Data Segment Size Alignment. If we find an MTU in
3385 * the Hardware MTU Table which will result in a Data Segment Size with
3386 * the requested alignment _and_ that MTU isn't "too far" from the
3387 * closest MTU, then we'll return that rather than the closest MTU.
3388 */
3389 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3390 unsigned short header_size,
3391 unsigned short data_size_max,
3392 unsigned short data_size_align,
3393 unsigned int *mtu_idxp)
3394 {
3395 unsigned short max_mtu = header_size + data_size_max;
3396 unsigned short data_size_align_mask = data_size_align - 1;
3397 int mtu_idx, aligned_mtu_idx;
3398
3399 /* Scan the MTU Table till we find an MTU which is larger than our
3400 * Maximum MTU or we reach the end of the table. Along the way,
3401 * record the last MTU found, if any, which will result in a Data
3402 * Segment Length matching the requested alignment.
3403 */
3404 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3405 unsigned short data_size = mtus[mtu_idx] - header_size;
3406
3407 /* If this MTU minus the Header Size would result in a
3408 * Data Segment Size of the desired alignment, remember it.
3409 */
3410 if ((data_size & data_size_align_mask) == 0)
3411 aligned_mtu_idx = mtu_idx;
3412
3413 /* If we're not at the end of the Hardware MTU Table and the
3414 * next element is larger than our Maximum MTU, drop out of
3415 * the loop.
3416 */
3417 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3418 break;
3419 }
3420
3421 /* If we fell out of the loop because we ran to the end of the table,
3422 * then we just have to use the last [largest] entry.
3423 */
3424 if (mtu_idx == NMTUS)
3425 mtu_idx--;
3426
3427 /* If we found an MTU which resulted in the requested Data Segment
3428 * Length alignment and that's "not far" from the largest MTU which is
3429 * less than or equal to the maximum MTU, then use that.
3430 */
3431 if (aligned_mtu_idx >= 0 &&
3432 mtu_idx - aligned_mtu_idx <= 1)
3433 mtu_idx = aligned_mtu_idx;
3434
3435 /* If the caller has passed in an MTU Index pointer, pass the
3436 * MTU Index back. Return the MTU value.
3437 */
3438 if (mtu_idxp)
3439 *mtu_idxp = mtu_idx;
3440 return mtus[mtu_idx];
3441 }
3442 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3443
3444 /**
3445 * cxgb4_port_chan - get the HW channel of a port
3446 * @dev: the net device for the port
3447 *
3448 * Return the HW Tx channel of the given port.
3449 */
3450 unsigned int cxgb4_port_chan(const struct net_device *dev)
3451 {
3452 return netdev2pinfo(dev)->tx_chan;
3453 }
3454 EXPORT_SYMBOL(cxgb4_port_chan);
3455
3456 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3457 {
3458 struct adapter *adap = netdev2adap(dev);
3459 u32 v1, v2, lp_count, hp_count;
3460
3461 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3462 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
3463 if (is_t4(adap->params.chip)) {
3464 lp_count = LP_COUNT_G(v1);
3465 hp_count = HP_COUNT_G(v1);
3466 } else {
3467 lp_count = LP_COUNT_T5_G(v1);
3468 hp_count = HP_COUNT_T5_G(v2);
3469 }
3470 return lpfifo ? lp_count : hp_count;
3471 }
3472 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3473
3474 /**
3475 * cxgb4_port_viid - get the VI id of a port
3476 * @dev: the net device for the port
3477 *
3478 * Return the VI id of the given port.
3479 */
3480 unsigned int cxgb4_port_viid(const struct net_device *dev)
3481 {
3482 return netdev2pinfo(dev)->viid;
3483 }
3484 EXPORT_SYMBOL(cxgb4_port_viid);
3485
3486 /**
3487 * cxgb4_port_idx - get the index of a port
3488 * @dev: the net device for the port
3489 *
3490 * Return the index of the given port.
3491 */
3492 unsigned int cxgb4_port_idx(const struct net_device *dev)
3493 {
3494 return netdev2pinfo(dev)->port_id;
3495 }
3496 EXPORT_SYMBOL(cxgb4_port_idx);
3497
3498 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3499 struct tp_tcp_stats *v6)
3500 {
3501 struct adapter *adap = pci_get_drvdata(pdev);
3502
3503 spin_lock(&adap->stats_lock);
3504 t4_tp_get_tcp_stats(adap, v4, v6);
3505 spin_unlock(&adap->stats_lock);
3506 }
3507 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3508
3509 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3510 const unsigned int *pgsz_order)
3511 {
3512 struct adapter *adap = netdev2adap(dev);
3513
3514 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
3515 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
3516 HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
3517 HPZ3_V(pgsz_order[3]));
3518 }
3519 EXPORT_SYMBOL(cxgb4_iscsi_init);
3520
3521 int cxgb4_flush_eq_cache(struct net_device *dev)
3522 {
3523 struct adapter *adap = netdev2adap(dev);
3524 int ret;
3525
3526 ret = t4_fwaddrspace_write(adap, adap->mbox,
3527 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
3528 return ret;
3529 }
3530 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3531
3532 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3533 {
3534 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
3535 __be64 indices;
3536 int ret;
3537
3538 spin_lock(&adap->win0_lock);
3539 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3540 sizeof(indices), (__be32 *)&indices,
3541 T4_MEMORY_READ);
3542 spin_unlock(&adap->win0_lock);
3543 if (!ret) {
3544 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3545 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3546 }
3547 return ret;
3548 }
3549
3550 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3551 u16 size)
3552 {
3553 struct adapter *adap = netdev2adap(dev);
3554 u16 hw_pidx, hw_cidx;
3555 int ret;
3556
3557 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3558 if (ret)
3559 goto out;
3560
3561 if (pidx != hw_pidx) {
3562 u16 delta;
3563 u32 val;
3564
3565 if (pidx >= hw_pidx)
3566 delta = pidx - hw_pidx;
3567 else
3568 delta = size - hw_pidx + pidx;
3569
3570 if (is_t4(adap->params.chip))
3571 val = PIDX_V(delta);
3572 else
3573 val = PIDX_T5_V(delta);
3574 wmb();
3575 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3576 QID_V(qid) | val);
3577 }
3578 out:
3579 return ret;
3580 }
3581 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3582
3583 void cxgb4_disable_db_coalescing(struct net_device *dev)
3584 {
3585 struct adapter *adap;
3586
3587 adap = netdev2adap(dev);
3588 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
3589 NOCOALESCE_F);
3590 }
3591 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3592
3593 void cxgb4_enable_db_coalescing(struct net_device *dev)
3594 {
3595 struct adapter *adap;
3596
3597 adap = netdev2adap(dev);
3598 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
3599 }
3600 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3601
3602 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3603 {
3604 struct adapter *adap;
3605 u32 offset, memtype, memaddr;
3606 u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
3607 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3608 int ret;
3609
3610 adap = netdev2adap(dev);
3611
3612 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3613
3614 /* Figure out where the offset lands in the Memory Type/Address scheme.
3615 * This code assumes that the memory is laid out starting at offset 0
3616 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3617 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3618 * MC0, and some have both MC0 and MC1.
3619 */
3620 size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
3621 edc0_size = EDRAM0_SIZE_G(size) << 20;
3622 size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
3623 edc1_size = EDRAM1_SIZE_G(size) << 20;
3624 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
3625 mc0_size = EXT_MEM0_SIZE_G(size) << 20;
3626
3627 edc0_end = edc0_size;
3628 edc1_end = edc0_end + edc1_size;
3629 mc0_end = edc1_end + mc0_size;
3630
3631 if (offset < edc0_end) {
3632 memtype = MEM_EDC0;
3633 memaddr = offset;
3634 } else if (offset < edc1_end) {
3635 memtype = MEM_EDC1;
3636 memaddr = offset - edc0_end;
3637 } else {
3638 if (offset < mc0_end) {
3639 memtype = MEM_MC0;
3640 memaddr = offset - edc1_end;
3641 } else if (is_t4(adap->params.chip)) {
3642 /* T4 only has a single memory channel */
3643 goto err;
3644 } else {
3645 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
3646 mc1_size = EXT_MEM1_SIZE_G(size) << 20;
3647 mc1_end = mc0_end + mc1_size;
3648 if (offset < mc1_end) {
3649 memtype = MEM_MC1;
3650 memaddr = offset - mc0_end;
3651 } else {
3652 /* offset beyond the end of any memory */
3653 goto err;
3654 }
3655 }
3656 }
3657
3658 spin_lock(&adap->win0_lock);
3659 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3660 spin_unlock(&adap->win0_lock);
3661 return ret;
3662
3663 err:
3664 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3665 stag, offset);
3666 return -EINVAL;
3667 }
3668 EXPORT_SYMBOL(cxgb4_read_tpte);
3669
3670 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3671 {
3672 u32 hi, lo;
3673 struct adapter *adap;
3674
3675 adap = netdev2adap(dev);
3676 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
3677 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
3678
3679 return ((u64)hi << 32) | (u64)lo;
3680 }
3681 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3682
3683 int cxgb4_bar2_sge_qregs(struct net_device *dev,
3684 unsigned int qid,
3685 enum cxgb4_bar2_qtype qtype,
3686 u64 *pbar2_qoffset,
3687 unsigned int *pbar2_qid)
3688 {
3689 return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
3690 qid,
3691 (qtype == CXGB4_BAR2_QTYPE_EGRESS
3692 ? T4_BAR2_QTYPE_EGRESS
3693 : T4_BAR2_QTYPE_INGRESS),
3694 pbar2_qoffset,
3695 pbar2_qid);
3696 }
3697 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
3698
3699 static struct pci_driver cxgb4_driver;
3700
3701 static void check_neigh_update(struct neighbour *neigh)
3702 {
3703 const struct device *parent;
3704 const struct net_device *netdev = neigh->dev;
3705
3706 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3707 netdev = vlan_dev_real_dev(netdev);
3708 parent = netdev->dev.parent;
3709 if (parent && parent->driver == &cxgb4_driver.driver)
3710 t4_l2t_update(dev_get_drvdata(parent), neigh);
3711 }
3712
3713 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3714 void *data)
3715 {
3716 switch (event) {
3717 case NETEVENT_NEIGH_UPDATE:
3718 check_neigh_update(data);
3719 break;
3720 case NETEVENT_REDIRECT:
3721 default:
3722 break;
3723 }
3724 return 0;
3725 }
3726
3727 static bool netevent_registered;
3728 static struct notifier_block cxgb4_netevent_nb = {
3729 .notifier_call = netevent_cb
3730 };
3731
3732 static void drain_db_fifo(struct adapter *adap, int usecs)
3733 {
3734 u32 v1, v2, lp_count, hp_count;
3735
3736 do {
3737 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
3738 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
3739 if (is_t4(adap->params.chip)) {
3740 lp_count = LP_COUNT_G(v1);
3741 hp_count = HP_COUNT_G(v1);
3742 } else {
3743 lp_count = LP_COUNT_T5_G(v1);
3744 hp_count = HP_COUNT_T5_G(v2);
3745 }
3746
3747 if (lp_count == 0 && hp_count == 0)
3748 break;
3749 set_current_state(TASK_UNINTERRUPTIBLE);
3750 schedule_timeout(usecs_to_jiffies(usecs));
3751 } while (1);
3752 }
3753
3754 static void disable_txq_db(struct sge_txq *q)
3755 {
3756 unsigned long flags;
3757
3758 spin_lock_irqsave(&q->db_lock, flags);
3759 q->db_disabled = 1;
3760 spin_unlock_irqrestore(&q->db_lock, flags);
3761 }
3762
3763 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3764 {
3765 spin_lock_irq(&q->db_lock);
3766 if (q->db_pidx_inc) {
3767 /* Make sure that all writes to the TX descriptors
3768 * are committed before we tell HW about them.
3769 */
3770 wmb();
3771 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3772 QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
3773 q->db_pidx_inc = 0;
3774 }
3775 q->db_disabled = 0;
3776 spin_unlock_irq(&q->db_lock);
3777 }
3778
3779 static void disable_dbs(struct adapter *adap)
3780 {
3781 int i;
3782
3783 for_each_ethrxq(&adap->sge, i)
3784 disable_txq_db(&adap->sge.ethtxq[i].q);
3785 for_each_ofldrxq(&adap->sge, i)
3786 disable_txq_db(&adap->sge.ofldtxq[i].q);
3787 for_each_port(adap, i)
3788 disable_txq_db(&adap->sge.ctrlq[i].q);
3789 }
3790
3791 static void enable_dbs(struct adapter *adap)
3792 {
3793 int i;
3794
3795 for_each_ethrxq(&adap->sge, i)
3796 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3797 for_each_ofldrxq(&adap->sge, i)
3798 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3799 for_each_port(adap, i)
3800 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3801 }
3802
3803 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3804 {
3805 if (adap->uld_handle[CXGB4_ULD_RDMA])
3806 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3807 cmd);
3808 }
3809
3810 static void process_db_full(struct work_struct *work)
3811 {
3812 struct adapter *adap;
3813
3814 adap = container_of(work, struct adapter, db_full_task);
3815
3816 drain_db_fifo(adap, dbfifo_drain_delay);
3817 enable_dbs(adap);
3818 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3819 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3820 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
3821 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
3822 }
3823
3824 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3825 {
3826 u16 hw_pidx, hw_cidx;
3827 int ret;
3828
3829 spin_lock_irq(&q->db_lock);
3830 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3831 if (ret)
3832 goto out;
3833 if (q->db_pidx != hw_pidx) {
3834 u16 delta;
3835 u32 val;
3836
3837 if (q->db_pidx >= hw_pidx)
3838 delta = q->db_pidx - hw_pidx;
3839 else
3840 delta = q->size - hw_pidx + q->db_pidx;
3841
3842 if (is_t4(adap->params.chip))
3843 val = PIDX_V(delta);
3844 else
3845 val = PIDX_T5_V(delta);
3846 wmb();
3847 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
3848 QID_V(q->cntxt_id) | val);
3849 }
3850 out:
3851 q->db_disabled = 0;
3852 q->db_pidx_inc = 0;
3853 spin_unlock_irq(&q->db_lock);
3854 if (ret)
3855 CH_WARN(adap, "DB drop recovery failed.\n");
3856 }
3857 static void recover_all_queues(struct adapter *adap)
3858 {
3859 int i;
3860
3861 for_each_ethrxq(&adap->sge, i)
3862 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3863 for_each_ofldrxq(&adap->sge, i)
3864 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3865 for_each_port(adap, i)
3866 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3867 }
3868
3869 static void process_db_drop(struct work_struct *work)
3870 {
3871 struct adapter *adap;
3872
3873 adap = container_of(work, struct adapter, db_drop_task);
3874
3875 if (is_t4(adap->params.chip)) {
3876 drain_db_fifo(adap, dbfifo_drain_delay);
3877 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3878 drain_db_fifo(adap, dbfifo_drain_delay);
3879 recover_all_queues(adap);
3880 drain_db_fifo(adap, dbfifo_drain_delay);
3881 enable_dbs(adap);
3882 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3883 } else {
3884 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3885 u16 qid = (dropped_db >> 15) & 0x1ffff;
3886 u16 pidx_inc = dropped_db & 0x1fff;
3887 u64 bar2_qoffset;
3888 unsigned int bar2_qid;
3889 int ret;
3890
3891 ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
3892 &bar2_qoffset, &bar2_qid);
3893 if (ret)
3894 dev_err(adap->pdev_dev, "doorbell drop recovery: "
3895 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
3896 else
3897 writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
3898 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
3899
3900 /* Re-enable BAR2 WC */
3901 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3902 }
3903
3904 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
3905 }
3906
3907 void t4_db_full(struct adapter *adap)
3908 {
3909 if (is_t4(adap->params.chip)) {
3910 disable_dbs(adap);
3911 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3912 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
3913 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
3914 queue_work(adap->workq, &adap->db_full_task);
3915 }
3916 }
3917
3918 void t4_db_dropped(struct adapter *adap)
3919 {
3920 if (is_t4(adap->params.chip)) {
3921 disable_dbs(adap);
3922 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3923 }
3924 queue_work(adap->workq, &adap->db_drop_task);
3925 }
3926
3927 static void uld_attach(struct adapter *adap, unsigned int uld)
3928 {
3929 void *handle;
3930 struct cxgb4_lld_info lli;
3931 unsigned short i;
3932
3933 lli.pdev = adap->pdev;
3934 lli.pf = adap->fn;
3935 lli.l2t = adap->l2t;
3936 lli.tids = &adap->tids;
3937 lli.ports = adap->port;
3938 lli.vr = &adap->vres;
3939 lli.mtus = adap->params.mtus;
3940 if (uld == CXGB4_ULD_RDMA) {
3941 lli.rxq_ids = adap->sge.rdma_rxq;
3942 lli.ciq_ids = adap->sge.rdma_ciq;
3943 lli.nrxq = adap->sge.rdmaqs;
3944 lli.nciq = adap->sge.rdmaciqs;
3945 } else if (uld == CXGB4_ULD_ISCSI) {
3946 lli.rxq_ids = adap->sge.ofld_rxq;
3947 lli.nrxq = adap->sge.ofldqsets;
3948 }
3949 lli.ntxq = adap->sge.ofldqsets;
3950 lli.nchan = adap->params.nports;
3951 lli.nports = adap->params.nports;
3952 lli.wr_cred = adap->params.ofldq_wr_cred;
3953 lli.adapter_type = adap->params.chip;
3954 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
3955 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
3956 lli.udb_density = 1 << adap->params.sge.eq_qpp;
3957 lli.ucq_density = 1 << adap->params.sge.iq_qpp;
3958 lli.filt_mode = adap->params.tp.vlan_pri_map;
3959 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3960 for (i = 0; i < NCHAN; i++)
3961 lli.tx_modq[i] = i;
3962 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
3963 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
3964 lli.fw_vers = adap->params.fw_vers;
3965 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3966 lli.sge_ingpadboundary = adap->sge.fl_align;
3967 lli.sge_egrstatuspagesize = adap->sge.stat_len;
3968 lli.sge_pktshift = adap->sge.pktshift;
3969 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3970 lli.max_ordird_qp = adap->params.max_ordird_qp;
3971 lli.max_ird_adapter = adap->params.max_ird_adapter;
3972 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
3973
3974 handle = ulds[uld].add(&lli);
3975 if (IS_ERR(handle)) {
3976 dev_warn(adap->pdev_dev,
3977 "could not attach to the %s driver, error %ld\n",
3978 uld_str[uld], PTR_ERR(handle));
3979 return;
3980 }
3981
3982 adap->uld_handle[uld] = handle;
3983
3984 if (!netevent_registered) {
3985 register_netevent_notifier(&cxgb4_netevent_nb);
3986 netevent_registered = true;
3987 }
3988
3989 if (adap->flags & FULL_INIT_DONE)
3990 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3991 }
3992
3993 static void attach_ulds(struct adapter *adap)
3994 {
3995 unsigned int i;
3996
3997 spin_lock(&adap_rcu_lock);
3998 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3999 spin_unlock(&adap_rcu_lock);
4000
4001 mutex_lock(&uld_mutex);
4002 list_add_tail(&adap->list_node, &adapter_list);
4003 for (i = 0; i < CXGB4_ULD_MAX; i++)
4004 if (ulds[i].add)
4005 uld_attach(adap, i);
4006 mutex_unlock(&uld_mutex);
4007 }
4008
4009 static void detach_ulds(struct adapter *adap)
4010 {
4011 unsigned int i;
4012
4013 mutex_lock(&uld_mutex);
4014 list_del(&adap->list_node);
4015 for (i = 0; i < CXGB4_ULD_MAX; i++)
4016 if (adap->uld_handle[i]) {
4017 ulds[i].state_change(adap->uld_handle[i],
4018 CXGB4_STATE_DETACH);
4019 adap->uld_handle[i] = NULL;
4020 }
4021 if (netevent_registered && list_empty(&adapter_list)) {
4022 unregister_netevent_notifier(&cxgb4_netevent_nb);
4023 netevent_registered = false;
4024 }
4025 mutex_unlock(&uld_mutex);
4026
4027 spin_lock(&adap_rcu_lock);
4028 list_del_rcu(&adap->rcu_node);
4029 spin_unlock(&adap_rcu_lock);
4030 }
4031
4032 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4033 {
4034 unsigned int i;
4035
4036 mutex_lock(&uld_mutex);
4037 for (i = 0; i < CXGB4_ULD_MAX; i++)
4038 if (adap->uld_handle[i])
4039 ulds[i].state_change(adap->uld_handle[i], new_state);
4040 mutex_unlock(&uld_mutex);
4041 }
4042
4043 /**
4044 * cxgb4_register_uld - register an upper-layer driver
4045 * @type: the ULD type
4046 * @p: the ULD methods
4047 *
4048 * Registers an upper-layer driver with this driver and notifies the ULD
4049 * about any presently available devices that support its type. Returns
4050 * %-EBUSY if a ULD of the same type is already registered.
4051 */
4052 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4053 {
4054 int ret = 0;
4055 struct adapter *adap;
4056
4057 if (type >= CXGB4_ULD_MAX)
4058 return -EINVAL;
4059 mutex_lock(&uld_mutex);
4060 if (ulds[type].add) {
4061 ret = -EBUSY;
4062 goto out;
4063 }
4064 ulds[type] = *p;
4065 list_for_each_entry(adap, &adapter_list, list_node)
4066 uld_attach(adap, type);
4067 out: mutex_unlock(&uld_mutex);
4068 return ret;
4069 }
4070 EXPORT_SYMBOL(cxgb4_register_uld);
4071
4072 /**
4073 * cxgb4_unregister_uld - unregister an upper-layer driver
4074 * @type: the ULD type
4075 *
4076 * Unregisters an existing upper-layer driver.
4077 */
4078 int cxgb4_unregister_uld(enum cxgb4_uld type)
4079 {
4080 struct adapter *adap;
4081
4082 if (type >= CXGB4_ULD_MAX)
4083 return -EINVAL;
4084 mutex_lock(&uld_mutex);
4085 list_for_each_entry(adap, &adapter_list, list_node)
4086 adap->uld_handle[type] = NULL;
4087 ulds[type].add = NULL;
4088 mutex_unlock(&uld_mutex);
4089 return 0;
4090 }
4091 EXPORT_SYMBOL(cxgb4_unregister_uld);
4092
4093 #if IS_ENABLED(CONFIG_IPV6)
4094 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4095 unsigned long event, void *data)
4096 {
4097 struct inet6_ifaddr *ifa = data;
4098 struct net_device *event_dev = ifa->idev->dev;
4099 const struct device *parent = NULL;
4100 #if IS_ENABLED(CONFIG_BONDING)
4101 struct adapter *adap;
4102 #endif
4103 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
4104 event_dev = vlan_dev_real_dev(event_dev);
4105 #if IS_ENABLED(CONFIG_BONDING)
4106 if (event_dev->flags & IFF_MASTER) {
4107 list_for_each_entry(adap, &adapter_list, list_node) {
4108 switch (event) {
4109 case NETDEV_UP:
4110 cxgb4_clip_get(adap->port[0],
4111 (const u32 *)ifa, 1);
4112 break;
4113 case NETDEV_DOWN:
4114 cxgb4_clip_release(adap->port[0],
4115 (const u32 *)ifa, 1);
4116 break;
4117 default:
4118 break;
4119 }
4120 }
4121 return NOTIFY_OK;
4122 }
4123 #endif
4124
4125 if (event_dev)
4126 parent = event_dev->dev.parent;
4127
4128 if (parent && parent->driver == &cxgb4_driver.driver) {
4129 switch (event) {
4130 case NETDEV_UP:
4131 cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
4132 break;
4133 case NETDEV_DOWN:
4134 cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
4135 break;
4136 default:
4137 break;
4138 }
4139 }
4140 return NOTIFY_OK;
4141 }
4142
4143 static bool inet6addr_registered;
4144 static struct notifier_block cxgb4_inet6addr_notifier = {
4145 .notifier_call = cxgb4_inet6addr_handler
4146 };
4147
4148 static void update_clip(const struct adapter *adap)
4149 {
4150 int i;
4151 struct net_device *dev;
4152 int ret;
4153
4154 rcu_read_lock();
4155
4156 for (i = 0; i < MAX_NPORTS; i++) {
4157 dev = adap->port[i];
4158 ret = 0;
4159
4160 if (dev)
4161 ret = cxgb4_update_root_dev_clip(dev);
4162
4163 if (ret < 0)
4164 break;
4165 }
4166 rcu_read_unlock();
4167 }
4168 #endif /* IS_ENABLED(CONFIG_IPV6) */
4169
4170 /**
4171 * cxgb_up - enable the adapter
4172 * @adap: adapter being enabled
4173 *
4174 * Called when the first port is enabled, this function performs the
4175 * actions necessary to make an adapter operational, such as completing
4176 * the initialization of HW modules, and enabling interrupts.
4177 *
4178 * Must be called with the rtnl lock held.
4179 */
4180 static int cxgb_up(struct adapter *adap)
4181 {
4182 int err;
4183
4184 err = setup_sge_queues(adap);
4185 if (err)
4186 goto out;
4187 err = setup_rss(adap);
4188 if (err)
4189 goto freeq;
4190
4191 if (adap->flags & USING_MSIX) {
4192 name_msix_vecs(adap);
4193 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4194 adap->msix_info[0].desc, adap);
4195 if (err)
4196 goto irq_err;
4197
4198 err = request_msix_queue_irqs(adap);
4199 if (err) {
4200 free_irq(adap->msix_info[0].vec, adap);
4201 goto irq_err;
4202 }
4203 } else {
4204 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4205 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4206 adap->port[0]->name, adap);
4207 if (err)
4208 goto irq_err;
4209 }
4210 enable_rx(adap);
4211 t4_sge_start(adap);
4212 t4_intr_enable(adap);
4213 adap->flags |= FULL_INIT_DONE;
4214 notify_ulds(adap, CXGB4_STATE_UP);
4215 #if IS_ENABLED(CONFIG_IPV6)
4216 update_clip(adap);
4217 #endif
4218 out:
4219 return err;
4220 irq_err:
4221 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4222 freeq:
4223 t4_free_sge_resources(adap);
4224 goto out;
4225 }
4226
4227 static void cxgb_down(struct adapter *adapter)
4228 {
4229 t4_intr_disable(adapter);
4230 cancel_work_sync(&adapter->tid_release_task);
4231 cancel_work_sync(&adapter->db_full_task);
4232 cancel_work_sync(&adapter->db_drop_task);
4233 adapter->tid_release_task_busy = false;
4234 adapter->tid_release_head = NULL;
4235
4236 if (adapter->flags & USING_MSIX) {
4237 free_msix_queue_irqs(adapter);
4238 free_irq(adapter->msix_info[0].vec, adapter);
4239 } else
4240 free_irq(adapter->pdev->irq, adapter);
4241 quiesce_rx(adapter);
4242 t4_sge_stop(adapter);
4243 t4_free_sge_resources(adapter);
4244 adapter->flags &= ~FULL_INIT_DONE;
4245 }
4246
4247 /*
4248 * net_device operations
4249 */
4250 static int cxgb_open(struct net_device *dev)
4251 {
4252 int err;
4253 struct port_info *pi = netdev_priv(dev);
4254 struct adapter *adapter = pi->adapter;
4255
4256 netif_carrier_off(dev);
4257
4258 if (!(adapter->flags & FULL_INIT_DONE)) {
4259 err = cxgb_up(adapter);
4260 if (err < 0)
4261 return err;
4262 }
4263
4264 err = link_start(dev);
4265 if (!err)
4266 netif_tx_start_all_queues(dev);
4267 return err;
4268 }
4269
4270 static int cxgb_close(struct net_device *dev)
4271 {
4272 struct port_info *pi = netdev_priv(dev);
4273 struct adapter *adapter = pi->adapter;
4274
4275 netif_tx_stop_all_queues(dev);
4276 netif_carrier_off(dev);
4277 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4278 }
4279
4280 /* Return an error number if the indicated filter isn't writable ...
4281 */
4282 static int writable_filter(struct filter_entry *f)
4283 {
4284 if (f->locked)
4285 return -EPERM;
4286 if (f->pending)
4287 return -EBUSY;
4288
4289 return 0;
4290 }
4291
4292 /* Delete the filter at the specified index (if valid). The checks for all
4293 * the common problems with doing this like the filter being locked, currently
4294 * pending in another operation, etc.
4295 */
4296 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4297 {
4298 struct filter_entry *f;
4299 int ret;
4300
4301 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4302 return -EINVAL;
4303
4304 f = &adapter->tids.ftid_tab[fidx];
4305 ret = writable_filter(f);
4306 if (ret)
4307 return ret;
4308 if (f->valid)
4309 return del_filter_wr(adapter, fidx);
4310
4311 return 0;
4312 }
4313
4314 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4315 __be32 sip, __be16 sport, __be16 vlan,
4316 unsigned int queue, unsigned char port, unsigned char mask)
4317 {
4318 int ret;
4319 struct filter_entry *f;
4320 struct adapter *adap;
4321 int i;
4322 u8 *val;
4323
4324 adap = netdev2adap(dev);
4325
4326 /* Adjust stid to correct filter index */
4327 stid -= adap->tids.sftid_base;
4328 stid += adap->tids.nftids;
4329
4330 /* Check to make sure the filter requested is writable ...
4331 */
4332 f = &adap->tids.ftid_tab[stid];
4333 ret = writable_filter(f);
4334 if (ret)
4335 return ret;
4336
4337 /* Clear out any old resources being used by the filter before
4338 * we start constructing the new filter.
4339 */
4340 if (f->valid)
4341 clear_filter(adap, f);
4342
4343 /* Clear out filter specifications */
4344 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4345 f->fs.val.lport = cpu_to_be16(sport);
4346 f->fs.mask.lport = ~0;
4347 val = (u8 *)&sip;
4348 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4349 for (i = 0; i < 4; i++) {
4350 f->fs.val.lip[i] = val[i];
4351 f->fs.mask.lip[i] = ~0;
4352 }
4353 if (adap->params.tp.vlan_pri_map & PORT_F) {
4354 f->fs.val.iport = port;
4355 f->fs.mask.iport = mask;
4356 }
4357 }
4358
4359 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
4360 f->fs.val.proto = IPPROTO_TCP;
4361 f->fs.mask.proto = ~0;
4362 }
4363
4364 f->fs.dirsteer = 1;
4365 f->fs.iq = queue;
4366 /* Mark filter as locked */
4367 f->locked = 1;
4368 f->fs.rpttid = 1;
4369
4370 ret = set_filter_wr(adap, stid);
4371 if (ret) {
4372 clear_filter(adap, f);
4373 return ret;
4374 }
4375
4376 return 0;
4377 }
4378 EXPORT_SYMBOL(cxgb4_create_server_filter);
4379
4380 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4381 unsigned int queue, bool ipv6)
4382 {
4383 int ret;
4384 struct filter_entry *f;
4385 struct adapter *adap;
4386
4387 adap = netdev2adap(dev);
4388
4389 /* Adjust stid to correct filter index */
4390 stid -= adap->tids.sftid_base;
4391 stid += adap->tids.nftids;
4392
4393 f = &adap->tids.ftid_tab[stid];
4394 /* Unlock the filter */
4395 f->locked = 0;
4396
4397 ret = delete_filter(adap, stid);
4398 if (ret)
4399 return ret;
4400
4401 return 0;
4402 }
4403 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4404
4405 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4406 struct rtnl_link_stats64 *ns)
4407 {
4408 struct port_stats stats;
4409 struct port_info *p = netdev_priv(dev);
4410 struct adapter *adapter = p->adapter;
4411
4412 /* Block retrieving statistics during EEH error
4413 * recovery. Otherwise, the recovery might fail
4414 * and the PCI device will be removed permanently
4415 */
4416 spin_lock(&adapter->stats_lock);
4417 if (!netif_device_present(dev)) {
4418 spin_unlock(&adapter->stats_lock);
4419 return ns;
4420 }
4421 t4_get_port_stats(adapter, p->tx_chan, &stats);
4422 spin_unlock(&adapter->stats_lock);
4423
4424 ns->tx_bytes = stats.tx_octets;
4425 ns->tx_packets = stats.tx_frames;
4426 ns->rx_bytes = stats.rx_octets;
4427 ns->rx_packets = stats.rx_frames;
4428 ns->multicast = stats.rx_mcast_frames;
4429
4430 /* detailed rx_errors */
4431 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4432 stats.rx_runt;
4433 ns->rx_over_errors = 0;
4434 ns->rx_crc_errors = stats.rx_fcs_err;
4435 ns->rx_frame_errors = stats.rx_symbol_err;
4436 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4437 stats.rx_ovflow2 + stats.rx_ovflow3 +
4438 stats.rx_trunc0 + stats.rx_trunc1 +
4439 stats.rx_trunc2 + stats.rx_trunc3;
4440 ns->rx_missed_errors = 0;
4441
4442 /* detailed tx_errors */
4443 ns->tx_aborted_errors = 0;
4444 ns->tx_carrier_errors = 0;
4445 ns->tx_fifo_errors = 0;
4446 ns->tx_heartbeat_errors = 0;
4447 ns->tx_window_errors = 0;
4448
4449 ns->tx_errors = stats.tx_error_frames;
4450 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4451 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4452 return ns;
4453 }
4454
4455 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4456 {
4457 unsigned int mbox;
4458 int ret = 0, prtad, devad;
4459 struct port_info *pi = netdev_priv(dev);
4460 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4461
4462 switch (cmd) {
4463 case SIOCGMIIPHY:
4464 if (pi->mdio_addr < 0)
4465 return -EOPNOTSUPP;
4466 data->phy_id = pi->mdio_addr;
4467 break;
4468 case SIOCGMIIREG:
4469 case SIOCSMIIREG:
4470 if (mdio_phy_id_is_c45(data->phy_id)) {
4471 prtad = mdio_phy_id_prtad(data->phy_id);
4472 devad = mdio_phy_id_devad(data->phy_id);
4473 } else if (data->phy_id < 32) {
4474 prtad = data->phy_id;
4475 devad = 0;
4476 data->reg_num &= 0x1f;
4477 } else
4478 return -EINVAL;
4479
4480 mbox = pi->adapter->fn;
4481 if (cmd == SIOCGMIIREG)
4482 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4483 data->reg_num, &data->val_out);
4484 else
4485 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4486 data->reg_num, data->val_in);
4487 break;
4488 default:
4489 return -EOPNOTSUPP;
4490 }
4491 return ret;
4492 }
4493
4494 static void cxgb_set_rxmode(struct net_device *dev)
4495 {
4496 /* unfortunately we can't return errors to the stack */
4497 set_rxmode(dev, -1, false);
4498 }
4499
4500 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4501 {
4502 int ret;
4503 struct port_info *pi = netdev_priv(dev);
4504
4505 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4506 return -EINVAL;
4507 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4508 -1, -1, -1, true);
4509 if (!ret)
4510 dev->mtu = new_mtu;
4511 return ret;
4512 }
4513
4514 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4515 {
4516 int ret;
4517 struct sockaddr *addr = p;
4518 struct port_info *pi = netdev_priv(dev);
4519
4520 if (!is_valid_ether_addr(addr->sa_data))
4521 return -EADDRNOTAVAIL;
4522
4523 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4524 pi->xact_addr_filt, addr->sa_data, true, true);
4525 if (ret < 0)
4526 return ret;
4527
4528 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4529 pi->xact_addr_filt = ret;
4530 return 0;
4531 }
4532
4533 #ifdef CONFIG_NET_POLL_CONTROLLER
4534 static void cxgb_netpoll(struct net_device *dev)
4535 {
4536 struct port_info *pi = netdev_priv(dev);
4537 struct adapter *adap = pi->adapter;
4538
4539 if (adap->flags & USING_MSIX) {
4540 int i;
4541 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4542
4543 for (i = pi->nqsets; i; i--, rx++)
4544 t4_sge_intr_msix(0, &rx->rspq);
4545 } else
4546 t4_intr_handler(adap)(0, adap);
4547 }
4548 #endif
4549
4550 static const struct net_device_ops cxgb4_netdev_ops = {
4551 .ndo_open = cxgb_open,
4552 .ndo_stop = cxgb_close,
4553 .ndo_start_xmit = t4_eth_xmit,
4554 .ndo_select_queue = cxgb_select_queue,
4555 .ndo_get_stats64 = cxgb_get_stats,
4556 .ndo_set_rx_mode = cxgb_set_rxmode,
4557 .ndo_set_mac_address = cxgb_set_mac_addr,
4558 .ndo_set_features = cxgb_set_features,
4559 .ndo_validate_addr = eth_validate_addr,
4560 .ndo_do_ioctl = cxgb_ioctl,
4561 .ndo_change_mtu = cxgb_change_mtu,
4562 #ifdef CONFIG_NET_POLL_CONTROLLER
4563 .ndo_poll_controller = cxgb_netpoll,
4564 #endif
4565 };
4566
4567 void t4_fatal_err(struct adapter *adap)
4568 {
4569 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
4570 t4_intr_disable(adap);
4571 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4572 }
4573
4574 /* Return the specified PCI-E Configuration Space register from our Physical
4575 * Function. We try first via a Firmware LDST Command since we prefer to let
4576 * the firmware own all of these registers, but if that fails we go for it
4577 * directly ourselves.
4578 */
4579 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4580 {
4581 struct fw_ldst_cmd ldst_cmd;
4582 u32 val;
4583 int ret;
4584
4585 /* Construct and send the Firmware LDST Command to retrieve the
4586 * specified PCI-E Configuration Space register.
4587 */
4588 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4589 ldst_cmd.op_to_addrspace =
4590 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
4591 FW_CMD_REQUEST_F |
4592 FW_CMD_READ_F |
4593 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
4594 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4595 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
4596 ldst_cmd.u.pcie.ctrl_to_fn =
4597 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
4598 ldst_cmd.u.pcie.r = reg;
4599 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4600 &ldst_cmd);
4601
4602 /* If the LDST Command suucceeded, exctract the returned register
4603 * value. Otherwise read it directly ourself.
4604 */
4605 if (ret == 0)
4606 val = ntohl(ldst_cmd.u.pcie.data[0]);
4607 else
4608 t4_hw_pci_read_cfg4(adap, reg, &val);
4609
4610 return val;
4611 }
4612
4613 static void setup_memwin(struct adapter *adap)
4614 {
4615 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4616
4617 if (is_t4(adap->params.chip)) {
4618 u32 bar0;
4619
4620 /* Truncation intentional: we only read the bottom 32-bits of
4621 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4622 * mechanism to read BAR0 instead of using
4623 * pci_resource_start() because we could be operating from
4624 * within a Virtual Machine which is trapping our accesses to
4625 * our Configuration Space and we need to set up the PCI-E
4626 * Memory Window decoders with the actual addresses which will
4627 * be coming across the PCI-E link.
4628 */
4629 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4630 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4631 adap->t4_bar0 = bar0;
4632
4633 mem_win0_base = bar0 + MEMWIN0_BASE;
4634 mem_win1_base = bar0 + MEMWIN1_BASE;
4635 mem_win2_base = bar0 + MEMWIN2_BASE;
4636 mem_win2_aperture = MEMWIN2_APERTURE;
4637 } else {
4638 /* For T5, only relative offset inside the PCIe BAR is passed */
4639 mem_win0_base = MEMWIN0_BASE;
4640 mem_win1_base = MEMWIN1_BASE;
4641 mem_win2_base = MEMWIN2_BASE_T5;
4642 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4643 }
4644 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
4645 mem_win0_base | BIR_V(0) |
4646 WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
4647 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
4648 mem_win1_base | BIR_V(0) |
4649 WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
4650 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
4651 mem_win2_base | BIR_V(0) |
4652 WINDOW_V(ilog2(mem_win2_aperture) - 10));
4653 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
4654 }
4655
4656 static void setup_memwin_rdma(struct adapter *adap)
4657 {
4658 if (adap->vres.ocq.size) {
4659 u32 start;
4660 unsigned int sz_kb;
4661
4662 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4663 start &= PCI_BASE_ADDRESS_MEM_MASK;
4664 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4665 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4666 t4_write_reg(adap,
4667 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
4668 start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
4669 t4_write_reg(adap,
4670 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
4671 adap->vres.ocq.start);
4672 t4_read_reg(adap,
4673 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
4674 }
4675 }
4676
4677 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4678 {
4679 u32 v;
4680 int ret;
4681
4682 /* get device capabilities */
4683 memset(c, 0, sizeof(*c));
4684 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4685 FW_CMD_REQUEST_F | FW_CMD_READ_F);
4686 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4687 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4688 if (ret < 0)
4689 return ret;
4690
4691 /* select capabilities we'll be using */
4692 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4693 if (!vf_acls)
4694 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4695 else
4696 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4697 } else if (vf_acls) {
4698 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4699 return ret;
4700 }
4701 c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4702 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4703 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4704 if (ret < 0)
4705 return ret;
4706
4707 ret = t4_config_glbl_rss(adap, adap->fn,
4708 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4709 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4710 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4711 if (ret < 0)
4712 return ret;
4713
4714 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4715 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4716 if (ret < 0)
4717 return ret;
4718
4719 t4_sge_init(adap);
4720
4721 /* tweak some settings */
4722 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4723 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4724 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4725 v = t4_read_reg(adap, TP_PIO_DATA_A);
4726 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4727
4728 /* first 4 Tx modulation queues point to consecutive Tx channels */
4729 adap->params.tp.tx_modq_map = 0xE4;
4730 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4731 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4732
4733 /* associate each Tx modulation queue with consecutive Tx channels */
4734 v = 0x84218421;
4735 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4736 &v, 1, TP_TX_SCHED_HDR_A);
4737 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4738 &v, 1, TP_TX_SCHED_FIFO_A);
4739 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4740 &v, 1, TP_TX_SCHED_PCMD_A);
4741
4742 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4743 if (is_offload(adap)) {
4744 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4745 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4746 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4747 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4748 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4749 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4750 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4751 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4752 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4753 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4754 }
4755
4756 /* get basic stuff going */
4757 return t4_early_init(adap, adap->fn);
4758 }
4759
4760 /*
4761 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4762 */
4763 #define MAX_ATIDS 8192U
4764
4765 /*
4766 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4767 *
4768 * If the firmware we're dealing with has Configuration File support, then
4769 * we use that to perform all configuration
4770 */
4771
4772 /*
4773 * Tweak configuration based on module parameters, etc. Most of these have
4774 * defaults assigned to them by Firmware Configuration Files (if we're using
4775 * them) but need to be explicitly set if we're using hard-coded
4776 * initialization. But even in the case of using Firmware Configuration
4777 * Files, we'd like to expose the ability to change these via module
4778 * parameters so these are essentially common tweaks/settings for
4779 * Configuration Files and hard-coded initialization ...
4780 */
4781 static int adap_init0_tweaks(struct adapter *adapter)
4782 {
4783 /*
4784 * Fix up various Host-Dependent Parameters like Page Size, Cache
4785 * Line Size, etc. The firmware default is for a 4KB Page Size and
4786 * 64B Cache Line Size ...
4787 */
4788 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4789
4790 /*
4791 * Process module parameters which affect early initialization.
4792 */
4793 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4794 dev_err(&adapter->pdev->dev,
4795 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4796 rx_dma_offset);
4797 rx_dma_offset = 2;
4798 }
4799 t4_set_reg_field(adapter, SGE_CONTROL_A,
4800 PKTSHIFT_V(PKTSHIFT_M),
4801 PKTSHIFT_V(rx_dma_offset));
4802
4803 /*
4804 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4805 * adds the pseudo header itself.
4806 */
4807 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4808 CSUM_HAS_PSEUDO_HDR_F, 0);
4809
4810 return 0;
4811 }
4812
4813 /*
4814 * Attempt to initialize the adapter via a Firmware Configuration File.
4815 */
4816 static int adap_init0_config(struct adapter *adapter, int reset)
4817 {
4818 struct fw_caps_config_cmd caps_cmd;
4819 const struct firmware *cf;
4820 unsigned long mtype = 0, maddr = 0;
4821 u32 finiver, finicsum, cfcsum;
4822 int ret;
4823 int config_issued = 0;
4824 char *fw_config_file, fw_config_file_path[256];
4825 char *config_name = NULL;
4826
4827 /*
4828 * Reset device if necessary.
4829 */
4830 if (reset) {
4831 ret = t4_fw_reset(adapter, adapter->mbox,
4832 PIORSTMODE_F | PIORST_F);
4833 if (ret < 0)
4834 goto bye;
4835 }
4836
4837 /*
4838 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4839 * then use that. Otherwise, use the configuration file stored
4840 * in the adapter flash ...
4841 */
4842 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4843 case CHELSIO_T4:
4844 fw_config_file = FW4_CFNAME;
4845 break;
4846 case CHELSIO_T5:
4847 fw_config_file = FW5_CFNAME;
4848 break;
4849 default:
4850 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4851 adapter->pdev->device);
4852 ret = -EINVAL;
4853 goto bye;
4854 }
4855
4856 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4857 if (ret < 0) {
4858 config_name = "On FLASH";
4859 mtype = FW_MEMTYPE_CF_FLASH;
4860 maddr = t4_flash_cfg_addr(adapter);
4861 } else {
4862 u32 params[7], val[7];
4863
4864 sprintf(fw_config_file_path,
4865 "/lib/firmware/%s", fw_config_file);
4866 config_name = fw_config_file_path;
4867
4868 if (cf->size >= FLASH_CFG_MAX_SIZE)
4869 ret = -ENOMEM;
4870 else {
4871 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4872 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4873 ret = t4_query_params(adapter, adapter->mbox,
4874 adapter->fn, 0, 1, params, val);
4875 if (ret == 0) {
4876 /*
4877 * For t4_memory_rw() below addresses and
4878 * sizes have to be in terms of multiples of 4
4879 * bytes. So, if the Configuration File isn't
4880 * a multiple of 4 bytes in length we'll have
4881 * to write that out separately since we can't
4882 * guarantee that the bytes following the
4883 * residual byte in the buffer returned by
4884 * request_firmware() are zeroed out ...
4885 */
4886 size_t resid = cf->size & 0x3;
4887 size_t size = cf->size & ~0x3;
4888 __be32 *data = (__be32 *)cf->data;
4889
4890 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4891 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4892
4893 spin_lock(&adapter->win0_lock);
4894 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4895 size, data, T4_MEMORY_WRITE);
4896 if (ret == 0 && resid != 0) {
4897 union {
4898 __be32 word;
4899 char buf[4];
4900 } last;
4901 int i;
4902
4903 last.word = data[size >> 2];
4904 for (i = resid; i < 4; i++)
4905 last.buf[i] = 0;
4906 ret = t4_memory_rw(adapter, 0, mtype,
4907 maddr + size,
4908 4, &last.word,
4909 T4_MEMORY_WRITE);
4910 }
4911 spin_unlock(&adapter->win0_lock);
4912 }
4913 }
4914
4915 release_firmware(cf);
4916 if (ret)
4917 goto bye;
4918 }
4919
4920 /*
4921 * Issue a Capability Configuration command to the firmware to get it
4922 * to parse the Configuration File. We don't use t4_fw_config_file()
4923 * because we want the ability to modify various features after we've
4924 * processed the configuration file ...
4925 */
4926 memset(&caps_cmd, 0, sizeof(caps_cmd));
4927 caps_cmd.op_to_write =
4928 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4929 FW_CMD_REQUEST_F |
4930 FW_CMD_READ_F);
4931 caps_cmd.cfvalid_to_len16 =
4932 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4933 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4934 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4935 FW_LEN16(caps_cmd));
4936 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4937 &caps_cmd);
4938
4939 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4940 * Configuration File in FLASH), our last gasp effort is to use the
4941 * Firmware Configuration File which is embedded in the firmware. A
4942 * very few early versions of the firmware didn't have one embedded
4943 * but we can ignore those.
4944 */
4945 if (ret == -ENOENT) {
4946 memset(&caps_cmd, 0, sizeof(caps_cmd));
4947 caps_cmd.op_to_write =
4948 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4949 FW_CMD_REQUEST_F |
4950 FW_CMD_READ_F);
4951 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4952 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4953 sizeof(caps_cmd), &caps_cmd);
4954 config_name = "Firmware Default";
4955 }
4956
4957 config_issued = 1;
4958 if (ret < 0)
4959 goto bye;
4960
4961 finiver = ntohl(caps_cmd.finiver);
4962 finicsum = ntohl(caps_cmd.finicsum);
4963 cfcsum = ntohl(caps_cmd.cfcsum);
4964 if (finicsum != cfcsum)
4965 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4966 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4967 finicsum, cfcsum);
4968
4969 /*
4970 * And now tell the firmware to use the configuration we just loaded.
4971 */
4972 caps_cmd.op_to_write =
4973 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4974 FW_CMD_REQUEST_F |
4975 FW_CMD_WRITE_F);
4976 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4977 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4978 NULL);
4979 if (ret < 0)
4980 goto bye;
4981
4982 /*
4983 * Tweak configuration based on system architecture, module
4984 * parameters, etc.
4985 */
4986 ret = adap_init0_tweaks(adapter);
4987 if (ret < 0)
4988 goto bye;
4989
4990 /*
4991 * And finally tell the firmware to initialize itself using the
4992 * parameters from the Configuration File.
4993 */
4994 ret = t4_fw_initialize(adapter, adapter->mbox);
4995 if (ret < 0)
4996 goto bye;
4997
4998 /* Emit Firmware Configuration File information and return
4999 * successfully.
5000 */
5001 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5002 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5003 config_name, finiver, cfcsum);
5004 return 0;
5005
5006 /*
5007 * Something bad happened. Return the error ... (If the "error"
5008 * is that there's no Configuration File on the adapter we don't
5009 * want to issue a warning since this is fairly common.)
5010 */
5011 bye:
5012 if (config_issued && ret != -ENOENT)
5013 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5014 config_name, -ret);
5015 return ret;
5016 }
5017
5018 static struct fw_info fw_info_array[] = {
5019 {
5020 .chip = CHELSIO_T4,
5021 .fs_name = FW4_CFNAME,
5022 .fw_mod_name = FW4_FNAME,
5023 .fw_hdr = {
5024 .chip = FW_HDR_CHIP_T4,
5025 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5026 .intfver_nic = FW_INTFVER(T4, NIC),
5027 .intfver_vnic = FW_INTFVER(T4, VNIC),
5028 .intfver_ri = FW_INTFVER(T4, RI),
5029 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5030 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5031 },
5032 }, {
5033 .chip = CHELSIO_T5,
5034 .fs_name = FW5_CFNAME,
5035 .fw_mod_name = FW5_FNAME,
5036 .fw_hdr = {
5037 .chip = FW_HDR_CHIP_T5,
5038 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5039 .intfver_nic = FW_INTFVER(T5, NIC),
5040 .intfver_vnic = FW_INTFVER(T5, VNIC),
5041 .intfver_ri = FW_INTFVER(T5, RI),
5042 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5043 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5044 },
5045 }
5046 };
5047
5048 static struct fw_info *find_fw_info(int chip)
5049 {
5050 int i;
5051
5052 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5053 if (fw_info_array[i].chip == chip)
5054 return &fw_info_array[i];
5055 }
5056 return NULL;
5057 }
5058
5059 /*
5060 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5061 */
5062 static int adap_init0(struct adapter *adap)
5063 {
5064 int ret;
5065 u32 v, port_vec;
5066 enum dev_state state;
5067 u32 params[7], val[7];
5068 struct fw_caps_config_cmd caps_cmd;
5069 struct fw_devlog_cmd devlog_cmd;
5070 u32 devlog_meminfo;
5071 int reset = 1;
5072
5073 /* Contact FW, advertising Master capability */
5074 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
5075 if (ret < 0) {
5076 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5077 ret);
5078 return ret;
5079 }
5080 if (ret == adap->mbox)
5081 adap->flags |= MASTER_PF;
5082
5083 /*
5084 * If we're the Master PF Driver and the device is uninitialized,
5085 * then let's consider upgrading the firmware ... (We always want
5086 * to check the firmware version number in order to A. get it for
5087 * later reporting and B. to warn if the currently loaded firmware
5088 * is excessively mismatched relative to the driver.)
5089 */
5090 t4_get_fw_version(adap, &adap->params.fw_vers);
5091 t4_get_tp_version(adap, &adap->params.tp_vers);
5092 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5093 struct fw_info *fw_info;
5094 struct fw_hdr *card_fw;
5095 const struct firmware *fw;
5096 const u8 *fw_data = NULL;
5097 unsigned int fw_size = 0;
5098
5099 /* This is the firmware whose headers the driver was compiled
5100 * against
5101 */
5102 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5103 if (fw_info == NULL) {
5104 dev_err(adap->pdev_dev,
5105 "unable to get firmware info for chip %d.\n",
5106 CHELSIO_CHIP_VERSION(adap->params.chip));
5107 return -EINVAL;
5108 }
5109
5110 /* allocate memory to read the header of the firmware on the
5111 * card
5112 */
5113 card_fw = t4_alloc_mem(sizeof(*card_fw));
5114
5115 /* Get FW from from /lib/firmware/ */
5116 ret = request_firmware(&fw, fw_info->fw_mod_name,
5117 adap->pdev_dev);
5118 if (ret < 0) {
5119 dev_err(adap->pdev_dev,
5120 "unable to load firmware image %s, error %d\n",
5121 fw_info->fw_mod_name, ret);
5122 } else {
5123 fw_data = fw->data;
5124 fw_size = fw->size;
5125 }
5126
5127 /* upgrade FW logic */
5128 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5129 state, &reset);
5130
5131 /* Cleaning up */
5132 if (fw != NULL)
5133 release_firmware(fw);
5134 t4_free_mem(card_fw);
5135
5136 if (ret < 0)
5137 goto bye;
5138 }
5139
5140 /*
5141 * Grab VPD parameters. This should be done after we establish a
5142 * connection to the firmware since some of the VPD parameters
5143 * (notably the Core Clock frequency) are retrieved via requests to
5144 * the firmware. On the other hand, we need these fairly early on
5145 * so we do this right after getting ahold of the firmware.
5146 */
5147 ret = get_vpd_params(adap, &adap->params.vpd);
5148 if (ret < 0)
5149 goto bye;
5150
5151 /* Read firmware device log parameters. We really need to find a way
5152 * to get these parameters initialized with some default values (which
5153 * are likely to be correct) for the case where we either don't
5154 * attache to the firmware or it's crashed when we probe the adapter.
5155 * That way we'll still be able to perform early firmware startup
5156 * debugging ... If the request to get the Firmware's Device Log
5157 * parameters fails, we'll live so we don't make that a fatal error.
5158 */
5159 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
5160 devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
5161 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5162 devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
5163 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
5164 &devlog_cmd);
5165 if (ret == 0) {
5166 devlog_meminfo =
5167 ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
5168 adap->params.devlog.memtype =
5169 FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
5170 adap->params.devlog.start =
5171 FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
5172 adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
5173 }
5174
5175 /*
5176 * Find out what ports are available to us. Note that we need to do
5177 * this before calling adap_init0_no_config() since it needs nports
5178 * and portvec ...
5179 */
5180 v =
5181 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5182 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
5183 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5184 if (ret < 0)
5185 goto bye;
5186
5187 adap->params.nports = hweight32(port_vec);
5188 adap->params.portvec = port_vec;
5189
5190 /* If the firmware is initialized already, emit a simply note to that
5191 * effect. Otherwise, it's time to try initializing the adapter.
5192 */
5193 if (state == DEV_STATE_INIT) {
5194 dev_info(adap->pdev_dev, "Coming up as %s: "\
5195 "Adapter already initialized\n",
5196 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5197 } else {
5198 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5199 "Initializing adapter\n");
5200
5201 /* Find out whether we're dealing with a version of the
5202 * firmware which has configuration file support.
5203 */
5204 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
5205 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
5206 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5207 params, val);
5208
5209 /* If the firmware doesn't support Configuration Files,
5210 * return an error.
5211 */
5212 if (ret < 0) {
5213 dev_err(adap->pdev_dev, "firmware doesn't support "
5214 "Firmware Configuration Files\n");
5215 goto bye;
5216 }
5217
5218 /* The firmware provides us with a memory buffer where we can
5219 * load a Configuration File from the host if we want to
5220 * override the Configuration File in flash.
5221 */
5222 ret = adap_init0_config(adap, reset);
5223 if (ret == -ENOENT) {
5224 dev_err(adap->pdev_dev, "no Configuration File "
5225 "present on adapter.\n");
5226 goto bye;
5227 }
5228 if (ret < 0) {
5229 dev_err(adap->pdev_dev, "could not initialize "
5230 "adapter, error %d\n", -ret);
5231 goto bye;
5232 }
5233 }
5234
5235 /* Give the SGE code a chance to pull in anything that it needs ...
5236 * Note that this must be called after we retrieve our VPD parameters
5237 * in order to know how to convert core ticks to seconds, etc.
5238 */
5239 ret = t4_sge_init(adap);
5240 if (ret < 0)
5241 goto bye;
5242
5243 if (is_bypass_device(adap->pdev->device))
5244 adap->params.bypass = 1;
5245
5246 /*
5247 * Grab some of our basic fundamental operating parameters.
5248 */
5249 #define FW_PARAM_DEV(param) \
5250 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
5251 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
5252
5253 #define FW_PARAM_PFVF(param) \
5254 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
5255 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
5256 FW_PARAMS_PARAM_Y_V(0) | \
5257 FW_PARAMS_PARAM_Z_V(0)
5258
5259 params[0] = FW_PARAM_PFVF(EQ_START);
5260 params[1] = FW_PARAM_PFVF(L2T_START);
5261 params[2] = FW_PARAM_PFVF(L2T_END);
5262 params[3] = FW_PARAM_PFVF(FILTER_START);
5263 params[4] = FW_PARAM_PFVF(FILTER_END);
5264 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5265 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5266 if (ret < 0)
5267 goto bye;
5268 adap->sge.egr_start = val[0];
5269 adap->l2t_start = val[1];
5270 adap->l2t_end = val[2];
5271 adap->tids.ftid_base = val[3];
5272 adap->tids.nftids = val[4] - val[3] + 1;
5273 adap->sge.ingr_start = val[5];
5274
5275 params[0] = FW_PARAM_PFVF(CLIP_START);
5276 params[1] = FW_PARAM_PFVF(CLIP_END);
5277 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5278 if (ret < 0)
5279 goto bye;
5280 adap->clipt_start = val[0];
5281 adap->clipt_end = val[1];
5282
5283 /* query params related to active filter region */
5284 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5285 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5286 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5287 /* If Active filter size is set we enable establishing
5288 * offload connection through firmware work request
5289 */
5290 if ((val[0] != val[1]) && (ret >= 0)) {
5291 adap->flags |= FW_OFLD_CONN;
5292 adap->tids.aftid_base = val[0];
5293 adap->tids.aftid_end = val[1];
5294 }
5295
5296 /* If we're running on newer firmware, let it know that we're
5297 * prepared to deal with encapsulated CPL messages. Older
5298 * firmware won't understand this and we'll just get
5299 * unencapsulated messages ...
5300 */
5301 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5302 val[0] = 1;
5303 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5304
5305 /*
5306 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5307 * capability. Earlier versions of the firmware didn't have the
5308 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5309 * permission to use ULPTX MEMWRITE DSGL.
5310 */
5311 if (is_t4(adap->params.chip)) {
5312 adap->params.ulptx_memwrite_dsgl = false;
5313 } else {
5314 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5315 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5316 1, params, val);
5317 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5318 }
5319
5320 /*
5321 * Get device capabilities so we can determine what resources we need
5322 * to manage.
5323 */
5324 memset(&caps_cmd, 0, sizeof(caps_cmd));
5325 caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5326 FW_CMD_REQUEST_F | FW_CMD_READ_F);
5327 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5328 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5329 &caps_cmd);
5330 if (ret < 0)
5331 goto bye;
5332
5333 if (caps_cmd.ofldcaps) {
5334 /* query offload-related parameters */
5335 params[0] = FW_PARAM_DEV(NTID);
5336 params[1] = FW_PARAM_PFVF(SERVER_START);
5337 params[2] = FW_PARAM_PFVF(SERVER_END);
5338 params[3] = FW_PARAM_PFVF(TDDP_START);
5339 params[4] = FW_PARAM_PFVF(TDDP_END);
5340 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5341 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5342 params, val);
5343 if (ret < 0)
5344 goto bye;
5345 adap->tids.ntids = val[0];
5346 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5347 adap->tids.stid_base = val[1];
5348 adap->tids.nstids = val[2] - val[1] + 1;
5349 /*
5350 * Setup server filter region. Divide the availble filter
5351 * region into two parts. Regular filters get 1/3rd and server
5352 * filters get 2/3rd part. This is only enabled if workarond
5353 * path is enabled.
5354 * 1. For regular filters.
5355 * 2. Server filter: This are special filters which are used
5356 * to redirect SYN packets to offload queue.
5357 */
5358 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5359 adap->tids.sftid_base = adap->tids.ftid_base +
5360 DIV_ROUND_UP(adap->tids.nftids, 3);
5361 adap->tids.nsftids = adap->tids.nftids -
5362 DIV_ROUND_UP(adap->tids.nftids, 3);
5363 adap->tids.nftids = adap->tids.sftid_base -
5364 adap->tids.ftid_base;
5365 }
5366 adap->vres.ddp.start = val[3];
5367 adap->vres.ddp.size = val[4] - val[3] + 1;
5368 adap->params.ofldq_wr_cred = val[5];
5369
5370 adap->params.offload = 1;
5371 }
5372 if (caps_cmd.rdmacaps) {
5373 params[0] = FW_PARAM_PFVF(STAG_START);
5374 params[1] = FW_PARAM_PFVF(STAG_END);
5375 params[2] = FW_PARAM_PFVF(RQ_START);
5376 params[3] = FW_PARAM_PFVF(RQ_END);
5377 params[4] = FW_PARAM_PFVF(PBL_START);
5378 params[5] = FW_PARAM_PFVF(PBL_END);
5379 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5380 params, val);
5381 if (ret < 0)
5382 goto bye;
5383 adap->vres.stag.start = val[0];
5384 adap->vres.stag.size = val[1] - val[0] + 1;
5385 adap->vres.rq.start = val[2];
5386 adap->vres.rq.size = val[3] - val[2] + 1;
5387 adap->vres.pbl.start = val[4];
5388 adap->vres.pbl.size = val[5] - val[4] + 1;
5389
5390 params[0] = FW_PARAM_PFVF(SQRQ_START);
5391 params[1] = FW_PARAM_PFVF(SQRQ_END);
5392 params[2] = FW_PARAM_PFVF(CQ_START);
5393 params[3] = FW_PARAM_PFVF(CQ_END);
5394 params[4] = FW_PARAM_PFVF(OCQ_START);
5395 params[5] = FW_PARAM_PFVF(OCQ_END);
5396 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5397 val);
5398 if (ret < 0)
5399 goto bye;
5400 adap->vres.qp.start = val[0];
5401 adap->vres.qp.size = val[1] - val[0] + 1;
5402 adap->vres.cq.start = val[2];
5403 adap->vres.cq.size = val[3] - val[2] + 1;
5404 adap->vres.ocq.start = val[4];
5405 adap->vres.ocq.size = val[5] - val[4] + 1;
5406
5407 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5408 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5409 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5410 val);
5411 if (ret < 0) {
5412 adap->params.max_ordird_qp = 8;
5413 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5414 ret = 0;
5415 } else {
5416 adap->params.max_ordird_qp = val[0];
5417 adap->params.max_ird_adapter = val[1];
5418 }
5419 dev_info(adap->pdev_dev,
5420 "max_ordird_qp %d max_ird_adapter %d\n",
5421 adap->params.max_ordird_qp,
5422 adap->params.max_ird_adapter);
5423 }
5424 if (caps_cmd.iscsicaps) {
5425 params[0] = FW_PARAM_PFVF(ISCSI_START);
5426 params[1] = FW_PARAM_PFVF(ISCSI_END);
5427 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5428 params, val);
5429 if (ret < 0)
5430 goto bye;
5431 adap->vres.iscsi.start = val[0];
5432 adap->vres.iscsi.size = val[1] - val[0] + 1;
5433 }
5434 #undef FW_PARAM_PFVF
5435 #undef FW_PARAM_DEV
5436
5437 /* The MTU/MSS Table is initialized by now, so load their values. If
5438 * we're initializing the adapter, then we'll make any modifications
5439 * we want to the MTU/MSS Table and also initialize the congestion
5440 * parameters.
5441 */
5442 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5443 if (state != DEV_STATE_INIT) {
5444 int i;
5445
5446 /* The default MTU Table contains values 1492 and 1500.
5447 * However, for TCP, it's better to have two values which are
5448 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5449 * This allows us to have a TCP Data Payload which is a
5450 * multiple of 8 regardless of what combination of TCP Options
5451 * are in use (always a multiple of 4 bytes) which is
5452 * important for performance reasons. For instance, if no
5453 * options are in use, then we have a 20-byte IP header and a
5454 * 20-byte TCP header. In this case, a 1500-byte MSS would
5455 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5456 * which is not a multiple of 8. So using an MSS of 1488 in
5457 * this case results in a TCP Data Payload of 1448 bytes which
5458 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5459 * Stamps have been negotiated, then an MTU of 1500 bytes
5460 * results in a TCP Data Payload of 1448 bytes which, as
5461 * above, is a multiple of 8 bytes ...
5462 */
5463 for (i = 0; i < NMTUS; i++)
5464 if (adap->params.mtus[i] == 1492) {
5465 adap->params.mtus[i] = 1488;
5466 break;
5467 }
5468
5469 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5470 adap->params.b_wnd);
5471 }
5472 t4_init_sge_params(adap);
5473 t4_init_tp_params(adap);
5474 adap->flags |= FW_OK;
5475 return 0;
5476
5477 /*
5478 * Something bad happened. If a command timed out or failed with EIO
5479 * FW does not operate within its spec or something catastrophic
5480 * happened to HW/FW, stop issuing commands.
5481 */
5482 bye:
5483 if (ret != -ETIMEDOUT && ret != -EIO)
5484 t4_fw_bye(adap, adap->mbox);
5485 return ret;
5486 }
5487
5488 /* EEH callbacks */
5489
5490 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5491 pci_channel_state_t state)
5492 {
5493 int i;
5494 struct adapter *adap = pci_get_drvdata(pdev);
5495
5496 if (!adap)
5497 goto out;
5498
5499 rtnl_lock();
5500 adap->flags &= ~FW_OK;
5501 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5502 spin_lock(&adap->stats_lock);
5503 for_each_port(adap, i) {
5504 struct net_device *dev = adap->port[i];
5505
5506 netif_device_detach(dev);
5507 netif_carrier_off(dev);
5508 }
5509 spin_unlock(&adap->stats_lock);
5510 if (adap->flags & FULL_INIT_DONE)
5511 cxgb_down(adap);
5512 rtnl_unlock();
5513 if ((adap->flags & DEV_ENABLED)) {
5514 pci_disable_device(pdev);
5515 adap->flags &= ~DEV_ENABLED;
5516 }
5517 out: return state == pci_channel_io_perm_failure ?
5518 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5519 }
5520
5521 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5522 {
5523 int i, ret;
5524 struct fw_caps_config_cmd c;
5525 struct adapter *adap = pci_get_drvdata(pdev);
5526
5527 if (!adap) {
5528 pci_restore_state(pdev);
5529 pci_save_state(pdev);
5530 return PCI_ERS_RESULT_RECOVERED;
5531 }
5532
5533 if (!(adap->flags & DEV_ENABLED)) {
5534 if (pci_enable_device(pdev)) {
5535 dev_err(&pdev->dev, "Cannot reenable PCI "
5536 "device after reset\n");
5537 return PCI_ERS_RESULT_DISCONNECT;
5538 }
5539 adap->flags |= DEV_ENABLED;
5540 }
5541
5542 pci_set_master(pdev);
5543 pci_restore_state(pdev);
5544 pci_save_state(pdev);
5545 pci_cleanup_aer_uncorrect_error_status(pdev);
5546
5547 if (t4_wait_dev_ready(adap->regs) < 0)
5548 return PCI_ERS_RESULT_DISCONNECT;
5549 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5550 return PCI_ERS_RESULT_DISCONNECT;
5551 adap->flags |= FW_OK;
5552 if (adap_init1(adap, &c))
5553 return PCI_ERS_RESULT_DISCONNECT;
5554
5555 for_each_port(adap, i) {
5556 struct port_info *p = adap2pinfo(adap, i);
5557
5558 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5559 NULL, NULL);
5560 if (ret < 0)
5561 return PCI_ERS_RESULT_DISCONNECT;
5562 p->viid = ret;
5563 p->xact_addr_filt = -1;
5564 }
5565
5566 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5567 adap->params.b_wnd);
5568 setup_memwin(adap);
5569 if (cxgb_up(adap))
5570 return PCI_ERS_RESULT_DISCONNECT;
5571 return PCI_ERS_RESULT_RECOVERED;
5572 }
5573
5574 static void eeh_resume(struct pci_dev *pdev)
5575 {
5576 int i;
5577 struct adapter *adap = pci_get_drvdata(pdev);
5578
5579 if (!adap)
5580 return;
5581
5582 rtnl_lock();
5583 for_each_port(adap, i) {
5584 struct net_device *dev = adap->port[i];
5585
5586 if (netif_running(dev)) {
5587 link_start(dev);
5588 cxgb_set_rxmode(dev);
5589 }
5590 netif_device_attach(dev);
5591 }
5592 rtnl_unlock();
5593 }
5594
5595 static const struct pci_error_handlers cxgb4_eeh = {
5596 .error_detected = eeh_err_detected,
5597 .slot_reset = eeh_slot_reset,
5598 .resume = eeh_resume,
5599 };
5600
5601 static inline bool is_x_10g_port(const struct link_config *lc)
5602 {
5603 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5604 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5605 }
5606
5607 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5608 unsigned int us, unsigned int cnt,
5609 unsigned int size, unsigned int iqe_size)
5610 {
5611 q->adap = adap;
5612 set_rspq_intr_params(q, us, cnt);
5613 q->iqe_len = iqe_size;
5614 q->size = size;
5615 }
5616
5617 /*
5618 * Perform default configuration of DMA queues depending on the number and type
5619 * of ports we found and the number of available CPUs. Most settings can be
5620 * modified by the admin prior to actual use.
5621 */
5622 static void cfg_queues(struct adapter *adap)
5623 {
5624 struct sge *s = &adap->sge;
5625 int i, n10g = 0, qidx = 0;
5626 #ifndef CONFIG_CHELSIO_T4_DCB
5627 int q10g = 0;
5628 #endif
5629 int ciq_size;
5630
5631 for_each_port(adap, i)
5632 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5633 #ifdef CONFIG_CHELSIO_T4_DCB
5634 /* For Data Center Bridging support we need to be able to support up
5635 * to 8 Traffic Priorities; each of which will be assigned to its
5636 * own TX Queue in order to prevent Head-Of-Line Blocking.
5637 */
5638 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
5639 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
5640 MAX_ETH_QSETS, adap->params.nports * 8);
5641 BUG_ON(1);
5642 }
5643
5644 for_each_port(adap, i) {
5645 struct port_info *pi = adap2pinfo(adap, i);
5646
5647 pi->first_qset = qidx;
5648 pi->nqsets = 8;
5649 qidx += pi->nqsets;
5650 }
5651 #else /* !CONFIG_CHELSIO_T4_DCB */
5652 /*
5653 * We default to 1 queue per non-10G port and up to # of cores queues
5654 * per 10G port.
5655 */
5656 if (n10g)
5657 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5658 if (q10g > netif_get_num_default_rss_queues())
5659 q10g = netif_get_num_default_rss_queues();
5660
5661 for_each_port(adap, i) {
5662 struct port_info *pi = adap2pinfo(adap, i);
5663
5664 pi->first_qset = qidx;
5665 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5666 qidx += pi->nqsets;
5667 }
5668 #endif /* !CONFIG_CHELSIO_T4_DCB */
5669
5670 s->ethqsets = qidx;
5671 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5672
5673 if (is_offload(adap)) {
5674 /*
5675 * For offload we use 1 queue/channel if all ports are up to 1G,
5676 * otherwise we divide all available queues amongst the channels
5677 * capped by the number of available cores.
5678 */
5679 if (n10g) {
5680 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5681 num_online_cpus());
5682 s->ofldqsets = roundup(i, adap->params.nports);
5683 } else
5684 s->ofldqsets = adap->params.nports;
5685 /* For RDMA one Rx queue per channel suffices */
5686 s->rdmaqs = adap->params.nports;
5687 s->rdmaciqs = adap->params.nports;
5688 }
5689
5690 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5691 struct sge_eth_rxq *r = &s->ethrxq[i];
5692
5693 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5694 r->fl.size = 72;
5695 }
5696
5697 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5698 s->ethtxq[i].q.size = 1024;
5699
5700 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5701 s->ctrlq[i].q.size = 512;
5702
5703 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5704 s->ofldtxq[i].q.size = 1024;
5705
5706 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5707 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5708
5709 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
5710 r->rspq.uld = CXGB4_ULD_ISCSI;
5711 r->fl.size = 72;
5712 }
5713
5714 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5715 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5716
5717 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
5718 r->rspq.uld = CXGB4_ULD_RDMA;
5719 r->fl.size = 72;
5720 }
5721
5722 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
5723 if (ciq_size > SGE_MAX_IQ_SIZE) {
5724 CH_WARN(adap, "CIQ size too small for available IQs\n");
5725 ciq_size = SGE_MAX_IQ_SIZE;
5726 }
5727
5728 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
5729 struct sge_ofld_rxq *r = &s->rdmaciq[i];
5730
5731 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
5732 r->rspq.uld = CXGB4_ULD_RDMA;
5733 }
5734
5735 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5736 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
5737 }
5738
5739 /*
5740 * Reduce the number of Ethernet queues across all ports to at most n.
5741 * n provides at least one queue per port.
5742 */
5743 static void reduce_ethqs(struct adapter *adap, int n)
5744 {
5745 int i;
5746 struct port_info *pi;
5747
5748 while (n < adap->sge.ethqsets)
5749 for_each_port(adap, i) {
5750 pi = adap2pinfo(adap, i);
5751 if (pi->nqsets > 1) {
5752 pi->nqsets--;
5753 adap->sge.ethqsets--;
5754 if (adap->sge.ethqsets <= n)
5755 break;
5756 }
5757 }
5758
5759 n = 0;
5760 for_each_port(adap, i) {
5761 pi = adap2pinfo(adap, i);
5762 pi->first_qset = n;
5763 n += pi->nqsets;
5764 }
5765 }
5766
5767 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5768 #define EXTRA_VECS 2
5769
5770 static int enable_msix(struct adapter *adap)
5771 {
5772 int ofld_need = 0;
5773 int i, want, need;
5774 struct sge *s = &adap->sge;
5775 unsigned int nchan = adap->params.nports;
5776 struct msix_entry entries[MAX_INGQ + 1];
5777
5778 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5779 entries[i].entry = i;
5780
5781 want = s->max_ethqsets + EXTRA_VECS;
5782 if (is_offload(adap)) {
5783 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
5784 /* need nchan for each possible ULD */
5785 ofld_need = 3 * nchan;
5786 }
5787 #ifdef CONFIG_CHELSIO_T4_DCB
5788 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5789 * each port.
5790 */
5791 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
5792 #else
5793 need = adap->params.nports + EXTRA_VECS + ofld_need;
5794 #endif
5795 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5796 if (want < 0)
5797 return want;
5798
5799 /*
5800 * Distribute available vectors to the various queue groups.
5801 * Every group gets its minimum requirement and NIC gets top
5802 * priority for leftovers.
5803 */
5804 i = want - EXTRA_VECS - ofld_need;
5805 if (i < s->max_ethqsets) {
5806 s->max_ethqsets = i;
5807 if (i < s->ethqsets)
5808 reduce_ethqs(adap, i);
5809 }
5810 if (is_offload(adap)) {
5811 i = want - EXTRA_VECS - s->max_ethqsets;
5812 i -= ofld_need - nchan;
5813 s->ofldqsets = (i / nchan) * nchan; /* round down */
5814 }
5815 for (i = 0; i < want; ++i)
5816 adap->msix_info[i].vec = entries[i].vector;
5817
5818 return 0;
5819 }
5820
5821 #undef EXTRA_VECS
5822
5823 static int init_rss(struct adapter *adap)
5824 {
5825 unsigned int i, j;
5826
5827 for_each_port(adap, i) {
5828 struct port_info *pi = adap2pinfo(adap, i);
5829
5830 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5831 if (!pi->rss)
5832 return -ENOMEM;
5833 for (j = 0; j < pi->rss_size; j++)
5834 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5835 }
5836 return 0;
5837 }
5838
5839 static void print_port_info(const struct net_device *dev)
5840 {
5841 char buf[80];
5842 char *bufp = buf;
5843 const char *spd = "";
5844 const struct port_info *pi = netdev_priv(dev);
5845 const struct adapter *adap = pi->adapter;
5846
5847 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5848 spd = " 2.5 GT/s";
5849 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5850 spd = " 5 GT/s";
5851 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
5852 spd = " 8 GT/s";
5853
5854 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5855 bufp += sprintf(bufp, "100/");
5856 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5857 bufp += sprintf(bufp, "1000/");
5858 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5859 bufp += sprintf(bufp, "10G/");
5860 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5861 bufp += sprintf(bufp, "40G/");
5862 if (bufp != buf)
5863 --bufp;
5864 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5865
5866 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5867 adap->params.vpd.id,
5868 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5869 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5870 (adap->flags & USING_MSIX) ? " MSI-X" :
5871 (adap->flags & USING_MSI) ? " MSI" : "");
5872 netdev_info(dev, "S/N: %s, P/N: %s\n",
5873 adap->params.vpd.sn, adap->params.vpd.pn);
5874 }
5875
5876 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5877 {
5878 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5879 }
5880
5881 /*
5882 * Free the following resources:
5883 * - memory used for tables
5884 * - MSI/MSI-X
5885 * - net devices
5886 * - resources FW is holding for us
5887 */
5888 static void free_some_resources(struct adapter *adapter)
5889 {
5890 unsigned int i;
5891
5892 t4_free_mem(adapter->l2t);
5893 t4_free_mem(adapter->tids.tid_tab);
5894 disable_msi(adapter);
5895
5896 for_each_port(adapter, i)
5897 if (adapter->port[i]) {
5898 kfree(adap2pinfo(adapter, i)->rss);
5899 free_netdev(adapter->port[i]);
5900 }
5901 if (adapter->flags & FW_OK)
5902 t4_fw_bye(adapter, adapter->fn);
5903 }
5904
5905 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5906 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5907 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5908 #define SEGMENT_SIZE 128
5909
5910 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5911 {
5912 int func, i, err, s_qpp, qpp, num_seg;
5913 struct port_info *pi;
5914 bool highdma = false;
5915 struct adapter *adapter = NULL;
5916 void __iomem *regs;
5917
5918 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5919
5920 err = pci_request_regions(pdev, KBUILD_MODNAME);
5921 if (err) {
5922 /* Just info, some other driver may have claimed the device. */
5923 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5924 return err;
5925 }
5926
5927 err = pci_enable_device(pdev);
5928 if (err) {
5929 dev_err(&pdev->dev, "cannot enable PCI device\n");
5930 goto out_release_regions;
5931 }
5932
5933 regs = pci_ioremap_bar(pdev, 0);
5934 if (!regs) {
5935 dev_err(&pdev->dev, "cannot map device registers\n");
5936 err = -ENOMEM;
5937 goto out_disable_device;
5938 }
5939
5940 err = t4_wait_dev_ready(regs);
5941 if (err < 0)
5942 goto out_unmap_bar0;
5943
5944 /* We control everything through one PF */
5945 func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
5946 if (func != ent->driver_data) {
5947 iounmap(regs);
5948 pci_disable_device(pdev);
5949 pci_save_state(pdev); /* to restore SR-IOV later */
5950 goto sriov;
5951 }
5952
5953 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5954 highdma = true;
5955 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5956 if (err) {
5957 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5958 "coherent allocations\n");
5959 goto out_unmap_bar0;
5960 }
5961 } else {
5962 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5963 if (err) {
5964 dev_err(&pdev->dev, "no usable DMA configuration\n");
5965 goto out_unmap_bar0;
5966 }
5967 }
5968
5969 pci_enable_pcie_error_reporting(pdev);
5970 enable_pcie_relaxed_ordering(pdev);
5971 pci_set_master(pdev);
5972 pci_save_state(pdev);
5973
5974 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5975 if (!adapter) {
5976 err = -ENOMEM;
5977 goto out_unmap_bar0;
5978 }
5979
5980 adapter->workq = create_singlethread_workqueue("cxgb4");
5981 if (!adapter->workq) {
5982 err = -ENOMEM;
5983 goto out_free_adapter;
5984 }
5985
5986 /* PCI device has been enabled */
5987 adapter->flags |= DEV_ENABLED;
5988
5989 adapter->regs = regs;
5990 adapter->pdev = pdev;
5991 adapter->pdev_dev = &pdev->dev;
5992 adapter->mbox = func;
5993 adapter->fn = func;
5994 adapter->msg_enable = dflt_msg_enable;
5995 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5996
5997 spin_lock_init(&adapter->stats_lock);
5998 spin_lock_init(&adapter->tid_release_lock);
5999 spin_lock_init(&adapter->win0_lock);
6000
6001 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6002 INIT_WORK(&adapter->db_full_task, process_db_full);
6003 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6004
6005 err = t4_prep_adapter(adapter);
6006 if (err)
6007 goto out_free_adapter;
6008
6009
6010 if (!is_t4(adapter->params.chip)) {
6011 s_qpp = (QUEUESPERPAGEPF0_S +
6012 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6013 adapter->fn);
6014 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6015 SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6016 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6017
6018 /* Each segment size is 128B. Write coalescing is enabled only
6019 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6020 * queue is less no of segments that can be accommodated in
6021 * a page size.
6022 */
6023 if (qpp > num_seg) {
6024 dev_err(&pdev->dev,
6025 "Incorrect number of egress queues per page\n");
6026 err = -EINVAL;
6027 goto out_free_adapter;
6028 }
6029 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6030 pci_resource_len(pdev, 2));
6031 if (!adapter->bar2) {
6032 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6033 err = -ENOMEM;
6034 goto out_free_adapter;
6035 }
6036 }
6037
6038 setup_memwin(adapter);
6039 err = adap_init0(adapter);
6040 setup_memwin_rdma(adapter);
6041 if (err)
6042 goto out_unmap_bar;
6043
6044 for_each_port(adapter, i) {
6045 struct net_device *netdev;
6046
6047 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6048 MAX_ETH_QSETS);
6049 if (!netdev) {
6050 err = -ENOMEM;
6051 goto out_free_dev;
6052 }
6053
6054 SET_NETDEV_DEV(netdev, &pdev->dev);
6055
6056 adapter->port[i] = netdev;
6057 pi = netdev_priv(netdev);
6058 pi->adapter = adapter;
6059 pi->xact_addr_filt = -1;
6060 pi->port_id = i;
6061 netdev->irq = pdev->irq;
6062
6063 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6064 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6065 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6066 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6067 if (highdma)
6068 netdev->hw_features |= NETIF_F_HIGHDMA;
6069 netdev->features |= netdev->hw_features;
6070 netdev->vlan_features = netdev->features & VLAN_FEAT;
6071
6072 netdev->priv_flags |= IFF_UNICAST_FLT;
6073
6074 netdev->netdev_ops = &cxgb4_netdev_ops;
6075 #ifdef CONFIG_CHELSIO_T4_DCB
6076 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6077 cxgb4_dcb_state_init(netdev);
6078 #endif
6079 netdev->ethtool_ops = &cxgb_ethtool_ops;
6080 }
6081
6082 pci_set_drvdata(pdev, adapter);
6083
6084 if (adapter->flags & FW_OK) {
6085 err = t4_port_init(adapter, func, func, 0);
6086 if (err)
6087 goto out_free_dev;
6088 }
6089
6090 /*
6091 * Configure queues and allocate tables now, they can be needed as
6092 * soon as the first register_netdev completes.
6093 */
6094 cfg_queues(adapter);
6095
6096 adapter->l2t = t4_init_l2t();
6097 if (!adapter->l2t) {
6098 /* We tolerate a lack of L2T, giving up some functionality */
6099 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6100 adapter->params.offload = 0;
6101 }
6102
6103 #if IS_ENABLED(CONFIG_IPV6)
6104 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6105 adapter->clipt_end);
6106 if (!adapter->clipt) {
6107 /* We tolerate a lack of clip_table, giving up
6108 * some functionality
6109 */
6110 dev_warn(&pdev->dev,
6111 "could not allocate Clip table, continuing\n");
6112 adapter->params.offload = 0;
6113 }
6114 #endif
6115 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6116 dev_warn(&pdev->dev, "could not allocate TID table, "
6117 "continuing\n");
6118 adapter->params.offload = 0;
6119 }
6120
6121 /* See what interrupts we'll be using */
6122 if (msi > 1 && enable_msix(adapter) == 0)
6123 adapter->flags |= USING_MSIX;
6124 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6125 adapter->flags |= USING_MSI;
6126
6127 err = init_rss(adapter);
6128 if (err)
6129 goto out_free_dev;
6130
6131 /*
6132 * The card is now ready to go. If any errors occur during device
6133 * registration we do not fail the whole card but rather proceed only
6134 * with the ports we manage to register successfully. However we must
6135 * register at least one net device.
6136 */
6137 for_each_port(adapter, i) {
6138 pi = adap2pinfo(adapter, i);
6139 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6140 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6141
6142 err = register_netdev(adapter->port[i]);
6143 if (err)
6144 break;
6145 adapter->chan_map[pi->tx_chan] = i;
6146 print_port_info(adapter->port[i]);
6147 }
6148 if (i == 0) {
6149 dev_err(&pdev->dev, "could not register any net devices\n");
6150 goto out_free_dev;
6151 }
6152 if (err) {
6153 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6154 err = 0;
6155 }
6156
6157 if (cxgb4_debugfs_root) {
6158 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6159 cxgb4_debugfs_root);
6160 setup_debugfs(adapter);
6161 }
6162
6163 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6164 pdev->needs_freset = 1;
6165
6166 if (is_offload(adapter))
6167 attach_ulds(adapter);
6168
6169 sriov:
6170 #ifdef CONFIG_PCI_IOV
6171 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6172 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6173 dev_info(&pdev->dev,
6174 "instantiated %u virtual functions\n",
6175 num_vf[func]);
6176 #endif
6177 return 0;
6178
6179 out_free_dev:
6180 free_some_resources(adapter);
6181 out_unmap_bar:
6182 if (!is_t4(adapter->params.chip))
6183 iounmap(adapter->bar2);
6184 out_free_adapter:
6185 if (adapter->workq)
6186 destroy_workqueue(adapter->workq);
6187
6188 kfree(adapter);
6189 out_unmap_bar0:
6190 iounmap(regs);
6191 out_disable_device:
6192 pci_disable_pcie_error_reporting(pdev);
6193 pci_disable_device(pdev);
6194 out_release_regions:
6195 pci_release_regions(pdev);
6196 return err;
6197 }
6198
6199 static void remove_one(struct pci_dev *pdev)
6200 {
6201 struct adapter *adapter = pci_get_drvdata(pdev);
6202
6203 #ifdef CONFIG_PCI_IOV
6204 pci_disable_sriov(pdev);
6205
6206 #endif
6207
6208 if (adapter) {
6209 int i;
6210
6211 /* Tear down per-adapter Work Queue first since it can contain
6212 * references to our adapter data structure.
6213 */
6214 destroy_workqueue(adapter->workq);
6215
6216 if (is_offload(adapter))
6217 detach_ulds(adapter);
6218
6219 for_each_port(adapter, i)
6220 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6221 unregister_netdev(adapter->port[i]);
6222
6223 debugfs_remove_recursive(adapter->debugfs_root);
6224
6225 /* If we allocated filters, free up state associated with any
6226 * valid filters ...
6227 */
6228 if (adapter->tids.ftid_tab) {
6229 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6230 for (i = 0; i < (adapter->tids.nftids +
6231 adapter->tids.nsftids); i++, f++)
6232 if (f->valid)
6233 clear_filter(adapter, f);
6234 }
6235
6236 if (adapter->flags & FULL_INIT_DONE)
6237 cxgb_down(adapter);
6238
6239 free_some_resources(adapter);
6240 #if IS_ENABLED(CONFIG_IPV6)
6241 t4_cleanup_clip_tbl(adapter);
6242 #endif
6243 iounmap(adapter->regs);
6244 if (!is_t4(adapter->params.chip))
6245 iounmap(adapter->bar2);
6246 pci_disable_pcie_error_reporting(pdev);
6247 if ((adapter->flags & DEV_ENABLED)) {
6248 pci_disable_device(pdev);
6249 adapter->flags &= ~DEV_ENABLED;
6250 }
6251 pci_release_regions(pdev);
6252 synchronize_rcu();
6253 kfree(adapter);
6254 } else
6255 pci_release_regions(pdev);
6256 }
6257
6258 static struct pci_driver cxgb4_driver = {
6259 .name = KBUILD_MODNAME,
6260 .id_table = cxgb4_pci_tbl,
6261 .probe = init_one,
6262 .remove = remove_one,
6263 .shutdown = remove_one,
6264 .err_handler = &cxgb4_eeh,
6265 };
6266
6267 static int __init cxgb4_init_module(void)
6268 {
6269 int ret;
6270
6271 /* Debugfs support is optional, just warn if this fails */
6272 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6273 if (!cxgb4_debugfs_root)
6274 pr_warn("could not create debugfs entry, continuing\n");
6275
6276 ret = pci_register_driver(&cxgb4_driver);
6277 if (ret < 0)
6278 debugfs_remove(cxgb4_debugfs_root);
6279
6280 #if IS_ENABLED(CONFIG_IPV6)
6281 if (!inet6addr_registered) {
6282 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6283 inet6addr_registered = true;
6284 }
6285 #endif
6286
6287 return ret;
6288 }
6289
6290 static void __exit cxgb4_cleanup_module(void)
6291 {
6292 #if IS_ENABLED(CONFIG_IPV6)
6293 if (inet6addr_registered) {
6294 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6295 inet6addr_registered = false;
6296 }
6297 #endif
6298 pci_unregister_driver(&cxgb4_driver);
6299 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6300 }
6301
6302 module_init(cxgb4_init_module);
6303 module_exit(cxgb4_cleanup_module);
This page took 0.332822 seconds and 5 git commands to generate.