2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <net/addrconf.h>
66 #include <asm/uaccess.h>
70 #include "t4_values.h"
73 #include "t4fw_version.h"
74 #include "cxgb4_dcb.h"
75 #include "cxgb4_debugfs.h"
79 char cxgb4_driver_name
[] = KBUILD_MODNAME
;
84 #define DRV_VERSION "2.0.0-ko"
85 const char cxgb4_driver_version
[] = DRV_VERSION
;
86 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
88 /* Host shadow copy of ingress filter entry. This is in host native format
89 * and doesn't match the ordering or bit order, etc. of the hardware of the
90 * firmware command. The use of bit-field structure elements is purely to
91 * remind ourselves of the field size limitations and save memory in the case
92 * where the filter table is large.
95 /* Administrative fields for filter.
97 u32 valid
:1; /* filter allocated and valid */
98 u32 locked
:1; /* filter is administratively locked */
100 u32 pending
:1; /* filter action is pending firmware reply */
101 u32 smtidx
:8; /* Source MAC Table index for smac */
102 struct l2t_entry
*l2t
; /* Layer Two Table entry for dmac */
104 /* The filter itself. Most of this is a straight copy of information
105 * provided by the extended ioctl(). Some fields are translated to
106 * internal forms -- for instance the Ingress Queue ID passed in from
107 * the ioctl() is translated into the Absolute Ingress Queue ID.
109 struct ch_filter_specification fs
;
112 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
113 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
114 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
116 /* Macros needed to support the PCI Device ID Table ...
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
119 static const struct pci_device_id cxgb4_pci_tbl[] = {
120 #define CH_PCI_DEVICE_ID_FUNCTION 0x4
122 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
125 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
127 #define CH_PCI_ID_TABLE_ENTRY(devid) \
128 {PCI_VDEVICE(CHELSIO, (devid)), 4}
130 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
134 #include "t4_pci_id_tbl.h"
136 #define FW4_FNAME "cxgb4/t4fw.bin"
137 #define FW5_FNAME "cxgb4/t5fw.bin"
138 #define FW6_FNAME "cxgb4/t6fw.bin"
139 #define FW4_CFNAME "cxgb4/t4-config.txt"
140 #define FW5_CFNAME "cxgb4/t5-config.txt"
141 #define FW6_CFNAME "cxgb4/t6-config.txt"
142 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
143 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
144 #define PHY_AQ1202_DEVICEID 0x4409
145 #define PHY_BCM84834_DEVICEID 0x4486
147 MODULE_DESCRIPTION(DRV_DESC
);
148 MODULE_AUTHOR("Chelsio Communications");
149 MODULE_LICENSE("Dual BSD/GPL");
150 MODULE_VERSION(DRV_VERSION
);
151 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
152 MODULE_FIRMWARE(FW4_FNAME
);
153 MODULE_FIRMWARE(FW5_FNAME
);
154 MODULE_FIRMWARE(FW6_FNAME
);
157 * Normally we're willing to become the firmware's Master PF but will be happy
158 * if another PF has already become the Master and initialized the adapter.
159 * Setting "force_init" will cause this driver to forcibly establish itself as
160 * the Master PF and initialize the adapter.
162 static uint force_init
;
164 module_param(force_init
, uint
, 0644);
165 MODULE_PARM_DESC(force_init
, "Forcibly become Master PF and initialize adapter,"
166 "deprecated parameter");
168 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
170 module_param(dflt_msg_enable
, int, 0644);
171 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T4 default message enable bitmap");
174 * The driver uses the best interrupt scheme available on a platform in the
175 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
176 * of these schemes the driver may consider as follows:
178 * msi = 2: choose from among all three options
179 * msi = 1: only consider MSI and INTx interrupts
180 * msi = 0: force INTx interrupts
184 module_param(msi
, int, 0644);
185 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
188 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
189 * offset by 2 bytes in order to have the IP headers line up on 4-byte
190 * boundaries. This is a requirement for many architectures which will throw
191 * a machine check fault if an attempt is made to access one of the 4-byte IP
192 * header fields on a non-4-byte boundary. And it's a major performance issue
193 * even on some architectures which allow it like some implementations of the
194 * x86 ISA. However, some architectures don't mind this and for some very
195 * edge-case performance sensitive applications (like forwarding large volumes
196 * of small packets), setting this DMA offset to 0 will decrease the number of
197 * PCI-E Bus transfers enough to measurably affect performance.
199 static int rx_dma_offset
= 2;
201 #ifdef CONFIG_PCI_IOV
202 /* Configure the number of PCI-E Virtual Function which are to be instantiated
203 * on SR-IOV Capable Physical Functions.
205 static unsigned int num_vf
[NUM_OF_PF_WITH_SRIOV
];
207 module_param_array(num_vf
, uint
, NULL
, 0644);
208 MODULE_PARM_DESC(num_vf
, "number of VFs for each of PFs 0-3");
211 /* TX Queue select used to determine what algorithm to use for selecting TX
212 * queue. Select between the kernel provided function (select_queue=0) or user
213 * cxgb_select_queue function (select_queue=1)
215 * Default: select_queue=0
217 static int select_queue
;
218 module_param(select_queue
, int, 0644);
219 MODULE_PARM_DESC(select_queue
,
220 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
222 static struct dentry
*cxgb4_debugfs_root
;
224 static LIST_HEAD(adapter_list
);
225 static DEFINE_MUTEX(uld_mutex
);
226 /* Adapter list to be accessed from atomic context */
227 static LIST_HEAD(adap_rcu_list
);
228 static DEFINE_SPINLOCK(adap_rcu_lock
);
229 static struct cxgb4_uld_info ulds
[CXGB4_ULD_MAX
];
230 static const char *const uld_str
[] = { "RDMA", "iSCSI", "iSCSIT" };
232 static void link_report(struct net_device
*dev
)
234 if (!netif_carrier_ok(dev
))
235 netdev_info(dev
, "link down\n");
237 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
240 const struct port_info
*p
= netdev_priv(dev
);
242 switch (p
->link_cfg
.speed
) {
256 pr_info("%s: unsupported speed: %d\n",
257 dev
->name
, p
->link_cfg
.speed
);
261 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
266 #ifdef CONFIG_CHELSIO_T4_DCB
267 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
268 static void dcb_tx_queue_prio_enable(struct net_device
*dev
, int enable
)
270 struct port_info
*pi
= netdev_priv(dev
);
271 struct adapter
*adap
= pi
->adapter
;
272 struct sge_eth_txq
*txq
= &adap
->sge
.ethtxq
[pi
->first_qset
];
275 /* We use a simple mapping of Port TX Queue Index to DCB
276 * Priority when we're enabling DCB.
278 for (i
= 0; i
< pi
->nqsets
; i
++, txq
++) {
282 name
= (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
284 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH
) |
285 FW_PARAMS_PARAM_YZ_V(txq
->q
.cntxt_id
));
286 value
= enable
? i
: 0xffffffff;
288 /* Since we can be called while atomic (from "interrupt
289 * level") we need to issue the Set Parameters Commannd
290 * without sleeping (timeout < 0).
292 err
= t4_set_params_timeout(adap
, adap
->mbox
, adap
->pf
, 0, 1,
294 -FW_CMD_MAX_TIMEOUT
);
297 dev_err(adap
->pdev_dev
,
298 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
299 enable
? "set" : "unset", pi
->port_id
, i
, -err
);
301 txq
->dcb_prio
= value
;
304 #endif /* CONFIG_CHELSIO_T4_DCB */
306 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
308 struct net_device
*dev
= adapter
->port
[port_id
];
310 /* Skip changes from disabled ports. */
311 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
313 netif_carrier_on(dev
);
315 #ifdef CONFIG_CHELSIO_T4_DCB
316 cxgb4_dcb_state_init(dev
);
317 dcb_tx_queue_prio_enable(dev
, false);
318 #endif /* CONFIG_CHELSIO_T4_DCB */
319 netif_carrier_off(dev
);
326 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
328 static const char *mod_str
[] = {
329 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
332 const struct net_device
*dev
= adap
->port
[port_id
];
333 const struct port_info
*pi
= netdev_priv(dev
);
335 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
336 netdev_info(dev
, "port module unplugged\n");
337 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
338 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
342 * Configure the exact and hash address filters to handle a port's multicast
343 * and secondary unicast MAC addresses.
345 static int set_addr_filters(const struct net_device
*dev
, bool sleep
)
353 const struct netdev_hw_addr
*ha
;
354 int uc_cnt
= netdev_uc_count(dev
);
355 int mc_cnt
= netdev_mc_count(dev
);
356 const struct port_info
*pi
= netdev_priv(dev
);
357 unsigned int mb
= pi
->adapter
->pf
;
359 /* first do the secondary unicast addresses */
360 netdev_for_each_uc_addr(ha
, dev
) {
361 addr
[naddr
++] = ha
->addr
;
362 if (--uc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
363 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
364 naddr
, addr
, filt_idx
, &uhash
, sleep
);
373 /* next set up the multicast addresses */
374 netdev_for_each_mc_addr(ha
, dev
) {
375 addr
[naddr
++] = ha
->addr
;
376 if (--mc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
377 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
378 naddr
, addr
, filt_idx
, &mhash
, sleep
);
387 return t4_set_addr_hash(pi
->adapter
, mb
, pi
->viid
, uhash
!= 0,
388 uhash
| mhash
, sleep
);
391 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
392 module_param(dbfifo_int_thresh
, int, 0644);
393 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
396 * usecs to sleep while draining the dbfifo
398 static int dbfifo_drain_delay
= 1000;
399 module_param(dbfifo_drain_delay
, int, 0644);
400 MODULE_PARM_DESC(dbfifo_drain_delay
,
401 "usecs to sleep while draining the dbfifo");
404 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
405 * If @mtu is -1 it is left unchanged.
407 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
410 struct port_info
*pi
= netdev_priv(dev
);
412 ret
= set_addr_filters(dev
, sleep_ok
);
414 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, mtu
,
415 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
416 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
422 * link_start - enable a port
423 * @dev: the port to enable
425 * Performs the MAC and PHY actions needed to enable a port.
427 static int link_start(struct net_device
*dev
)
430 struct port_info
*pi
= netdev_priv(dev
);
431 unsigned int mb
= pi
->adapter
->pf
;
434 * We do not set address filters and promiscuity here, the stack does
435 * that step explicitly.
437 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
438 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
440 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
441 pi
->xact_addr_filt
, dev
->dev_addr
, true,
444 pi
->xact_addr_filt
= ret
;
449 ret
= t4_link_l1cfg(pi
->adapter
, mb
, pi
->tx_chan
,
453 ret
= t4_enable_vi_params(pi
->adapter
, mb
, pi
->viid
, true,
454 true, CXGB4_DCB_ENABLED
);
461 int cxgb4_dcb_enabled(const struct net_device
*dev
)
463 #ifdef CONFIG_CHELSIO_T4_DCB
464 struct port_info
*pi
= netdev_priv(dev
);
466 if (!pi
->dcb
.enabled
)
469 return ((pi
->dcb
.state
== CXGB4_DCB_STATE_FW_ALLSYNCED
) ||
470 (pi
->dcb
.state
== CXGB4_DCB_STATE_HOST
));
475 EXPORT_SYMBOL(cxgb4_dcb_enabled
);
477 #ifdef CONFIG_CHELSIO_T4_DCB
478 /* Handle a Data Center Bridging update message from the firmware. */
479 static void dcb_rpl(struct adapter
*adap
, const struct fw_port_cmd
*pcmd
)
481 int port
= FW_PORT_CMD_PORTID_G(ntohl(pcmd
->op_to_portid
));
482 struct net_device
*dev
= adap
->port
[port
];
483 int old_dcb_enabled
= cxgb4_dcb_enabled(dev
);
486 cxgb4_dcb_handle_fw_update(adap
, pcmd
);
487 new_dcb_enabled
= cxgb4_dcb_enabled(dev
);
489 /* If the DCB has become enabled or disabled on the port then we're
490 * going to need to set up/tear down DCB Priority parameters for the
491 * TX Queues associated with the port.
493 if (new_dcb_enabled
!= old_dcb_enabled
)
494 dcb_tx_queue_prio_enable(dev
, new_dcb_enabled
);
496 #endif /* CONFIG_CHELSIO_T4_DCB */
498 /* Clear a filter and release any of its resources that we own. This also
499 * clears the filter's "pending" status.
501 static void clear_filter(struct adapter
*adap
, struct filter_entry
*f
)
503 /* If the new or old filter have loopback rewriteing rules then we'll
504 * need to free any existing Layer Two Table (L2T) entries of the old
505 * filter rule. The firmware will handle freeing up any Source MAC
506 * Table (SMT) entries used for rewriting Source MAC Addresses in
510 cxgb4_l2t_release(f
->l2t
);
512 /* The zeroing of the filter rule below clears the filter valid,
513 * pending, locked flags, l2t pointer, etc. so it's all we need for
516 memset(f
, 0, sizeof(*f
));
519 /* Handle a filter write/deletion reply.
521 static void filter_rpl(struct adapter
*adap
, const struct cpl_set_tcb_rpl
*rpl
)
523 unsigned int idx
= GET_TID(rpl
);
524 unsigned int nidx
= idx
- adap
->tids
.ftid_base
;
526 struct filter_entry
*f
;
528 if (idx
>= adap
->tids
.ftid_base
&& nidx
<
529 (adap
->tids
.nftids
+ adap
->tids
.nsftids
)) {
531 ret
= TCB_COOKIE_G(rpl
->cookie
);
532 f
= &adap
->tids
.ftid_tab
[idx
];
534 if (ret
== FW_FILTER_WR_FLT_DELETED
) {
535 /* Clear the filter when we get confirmation from the
536 * hardware that the filter has been deleted.
538 clear_filter(adap
, f
);
539 } else if (ret
== FW_FILTER_WR_SMT_TBL_FULL
) {
540 dev_err(adap
->pdev_dev
, "filter %u setup failed due to full SMT\n",
542 clear_filter(adap
, f
);
543 } else if (ret
== FW_FILTER_WR_FLT_ADDED
) {
544 f
->smtidx
= (be64_to_cpu(rpl
->oldval
) >> 24) & 0xff;
545 f
->pending
= 0; /* asynchronous setup completed */
548 /* Something went wrong. Issue a warning about the
549 * problem and clear everything out.
551 dev_err(adap
->pdev_dev
, "filter %u setup failed with error %u\n",
553 clear_filter(adap
, f
);
558 /* Response queue handler for the FW event queue.
560 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
561 const struct pkt_gl
*gl
)
563 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
565 rsp
++; /* skip RSS header */
567 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
569 if (unlikely(opcode
== CPL_FW4_MSG
&&
570 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
572 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
574 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
575 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
581 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
582 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
583 unsigned int qid
= EGR_QID_G(ntohl(p
->opcode_qid
));
586 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
588 if ((u8
*)txq
< (u8
*)q
->adap
->sge
.ofldtxq
) {
589 struct sge_eth_txq
*eq
;
591 eq
= container_of(txq
, struct sge_eth_txq
, q
);
592 netif_tx_wake_queue(eq
->txq
);
594 struct sge_ofld_txq
*oq
;
596 oq
= container_of(txq
, struct sge_ofld_txq
, q
);
597 tasklet_schedule(&oq
->qresume_tsk
);
599 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
600 const struct cpl_fw6_msg
*p
= (void *)rsp
;
602 #ifdef CONFIG_CHELSIO_T4_DCB
603 const struct fw_port_cmd
*pcmd
= (const void *)p
->data
;
604 unsigned int cmd
= FW_CMD_OP_G(ntohl(pcmd
->op_to_portid
));
605 unsigned int action
=
606 FW_PORT_CMD_ACTION_G(ntohl(pcmd
->action_to_len16
));
608 if (cmd
== FW_PORT_CMD
&&
609 action
== FW_PORT_ACTION_GET_PORT_INFO
) {
610 int port
= FW_PORT_CMD_PORTID_G(
611 be32_to_cpu(pcmd
->op_to_portid
));
612 struct net_device
*dev
= q
->adap
->port
[port
];
613 int state_input
= ((pcmd
->u
.info
.dcbxdis_pkd
&
614 FW_PORT_CMD_DCBXDIS_F
)
615 ? CXGB4_DCB_INPUT_FW_DISABLED
616 : CXGB4_DCB_INPUT_FW_ENABLED
);
618 cxgb4_dcb_state_fsm(dev
, state_input
);
621 if (cmd
== FW_PORT_CMD
&&
622 action
== FW_PORT_ACTION_L2_DCB_CFG
)
623 dcb_rpl(q
->adap
, pcmd
);
627 t4_handle_fw_rpl(q
->adap
, p
->data
);
628 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
629 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
631 do_l2t_write_rpl(q
->adap
, p
);
632 } else if (opcode
== CPL_SET_TCB_RPL
) {
633 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
635 filter_rpl(q
->adap
, p
);
637 dev_err(q
->adap
->pdev_dev
,
638 "unexpected CPL %#x on FW event queue\n", opcode
);
644 * uldrx_handler - response queue handler for ULD queues
645 * @q: the response queue that received the packet
646 * @rsp: the response queue descriptor holding the offload message
647 * @gl: the gather list of packet fragments
649 * Deliver an ingress offload packet to a ULD. All processing is done by
650 * the ULD, we just maintain statistics.
652 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
653 const struct pkt_gl
*gl
)
655 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
657 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
659 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
660 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
663 if (ulds
[q
->uld
].rx_handler(q
->adap
->uld_handle
[q
->uld
], rsp
, gl
)) {
669 else if (gl
== CXGB4_MSG_AN
)
676 static void disable_msi(struct adapter
*adapter
)
678 if (adapter
->flags
& USING_MSIX
) {
679 pci_disable_msix(adapter
->pdev
);
680 adapter
->flags
&= ~USING_MSIX
;
681 } else if (adapter
->flags
& USING_MSI
) {
682 pci_disable_msi(adapter
->pdev
);
683 adapter
->flags
&= ~USING_MSI
;
688 * Interrupt handler for non-data events used with MSI-X.
690 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
692 struct adapter
*adap
= cookie
;
693 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
));
697 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE_A
), v
);
699 if (adap
->flags
& MASTER_PF
)
700 t4_slow_intr_handler(adap
);
705 * Name the MSI-X interrupts.
707 static void name_msix_vecs(struct adapter
*adap
)
709 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
711 /* non-data interrupts */
712 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
715 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
716 adap
->port
[0]->name
);
718 /* Ethernet queues */
719 for_each_port(adap
, j
) {
720 struct net_device
*d
= adap
->port
[j
];
721 const struct port_info
*pi
= netdev_priv(d
);
723 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
724 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
729 for_each_iscsirxq(&adap
->sge
, i
)
730 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-iscsi%d",
731 adap
->port
[0]->name
, i
);
733 for_each_iscsitrxq(&adap
->sge
, i
)
734 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-iSCSIT%d",
735 adap
->port
[0]->name
, i
);
737 for_each_rdmarxq(&adap
->sge
, i
)
738 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma%d",
739 adap
->port
[0]->name
, i
);
741 for_each_rdmaciq(&adap
->sge
, i
)
742 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma-ciq%d",
743 adap
->port
[0]->name
, i
);
746 static int request_msix_queue_irqs(struct adapter
*adap
)
748 struct sge
*s
= &adap
->sge
;
749 int err
, ethqidx
, iscsiqidx
= 0, rdmaqidx
= 0, rdmaciqqidx
= 0;
753 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
754 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
758 for_each_ethrxq(s
, ethqidx
) {
759 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
761 adap
->msix_info
[msi_index
].desc
,
762 &s
->ethrxq
[ethqidx
].rspq
);
767 for_each_iscsirxq(s
, iscsiqidx
) {
768 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
770 adap
->msix_info
[msi_index
].desc
,
771 &s
->iscsirxq
[iscsiqidx
].rspq
);
776 for_each_iscsitrxq(s
, iscsitqidx
) {
777 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
779 adap
->msix_info
[msi_index
].desc
,
780 &s
->iscsitrxq
[iscsitqidx
].rspq
);
785 for_each_rdmarxq(s
, rdmaqidx
) {
786 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
788 adap
->msix_info
[msi_index
].desc
,
789 &s
->rdmarxq
[rdmaqidx
].rspq
);
794 for_each_rdmaciq(s
, rdmaciqqidx
) {
795 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
797 adap
->msix_info
[msi_index
].desc
,
798 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
806 while (--rdmaciqqidx
>= 0)
807 free_irq(adap
->msix_info
[--msi_index
].vec
,
808 &s
->rdmaciq
[rdmaciqqidx
].rspq
);
809 while (--rdmaqidx
>= 0)
810 free_irq(adap
->msix_info
[--msi_index
].vec
,
811 &s
->rdmarxq
[rdmaqidx
].rspq
);
812 while (--iscsitqidx
>= 0)
813 free_irq(adap
->msix_info
[--msi_index
].vec
,
814 &s
->iscsitrxq
[iscsitqidx
].rspq
);
815 while (--iscsiqidx
>= 0)
816 free_irq(adap
->msix_info
[--msi_index
].vec
,
817 &s
->iscsirxq
[iscsiqidx
].rspq
);
818 while (--ethqidx
>= 0)
819 free_irq(adap
->msix_info
[--msi_index
].vec
,
820 &s
->ethrxq
[ethqidx
].rspq
);
821 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
825 static void free_msix_queue_irqs(struct adapter
*adap
)
827 int i
, msi_index
= 2;
828 struct sge
*s
= &adap
->sge
;
830 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
831 for_each_ethrxq(s
, i
)
832 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
833 for_each_iscsirxq(s
, i
)
834 free_irq(adap
->msix_info
[msi_index
++].vec
,
835 &s
->iscsirxq
[i
].rspq
);
836 for_each_iscsitrxq(s
, i
)
837 free_irq(adap
->msix_info
[msi_index
++].vec
,
838 &s
->iscsitrxq
[i
].rspq
);
839 for_each_rdmarxq(s
, i
)
840 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmarxq
[i
].rspq
);
841 for_each_rdmaciq(s
, i
)
842 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmaciq
[i
].rspq
);
846 * cxgb4_write_rss - write the RSS table for a given port
848 * @queues: array of queue indices for RSS
850 * Sets up the portion of the HW RSS table for the port's VI to distribute
851 * packets to the Rx queues in @queues.
852 * Should never be called before setting up sge eth rx queues
854 int cxgb4_write_rss(const struct port_info
*pi
, const u16
*queues
)
858 struct adapter
*adapter
= pi
->adapter
;
859 const struct sge_eth_rxq
*rxq
;
861 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
862 rss
= kmalloc(pi
->rss_size
* sizeof(u16
), GFP_KERNEL
);
866 /* map the queue indices to queue ids */
867 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
868 rss
[i
] = rxq
[*queues
].rspq
.abs_id
;
870 err
= t4_config_rss_range(adapter
, adapter
->pf
, pi
->viid
, 0,
871 pi
->rss_size
, rss
, pi
->rss_size
);
872 /* If Tunnel All Lookup isn't specified in the global RSS
873 * Configuration, then we need to specify a default Ingress
874 * Queue for any ingress packets which aren't hashed. We'll
875 * use our first ingress queue ...
878 err
= t4_config_vi_rss(adapter
, adapter
->mbox
, pi
->viid
,
879 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F
|
880 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F
|
881 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F
|
882 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F
|
883 FW_RSS_VI_CONFIG_CMD_UDPEN_F
,
890 * setup_rss - configure RSS
893 * Sets up RSS for each port.
895 static int setup_rss(struct adapter
*adap
)
899 for_each_port(adap
, i
) {
900 const struct port_info
*pi
= adap2pinfo(adap
, i
);
902 /* Fill default values with equal distribution */
903 for (j
= 0; j
< pi
->rss_size
; j
++)
904 pi
->rss
[j
] = j
% pi
->nqsets
;
906 err
= cxgb4_write_rss(pi
, pi
->rss
);
914 * Return the channel of the ingress queue with the given qid.
916 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
918 qid
-= p
->ingr_start
;
919 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
923 * Wait until all NAPI handlers are descheduled.
925 static void quiesce_rx(struct adapter
*adap
)
929 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
930 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
932 if (q
&& q
->handler
) {
933 napi_disable(&q
->napi
);
935 while (!cxgb_poll_lock_napi(q
))
943 /* Disable interrupt and napi handler */
944 static void disable_interrupts(struct adapter
*adap
)
946 if (adap
->flags
& FULL_INIT_DONE
) {
947 t4_intr_disable(adap
);
948 if (adap
->flags
& USING_MSIX
) {
949 free_msix_queue_irqs(adap
);
950 free_irq(adap
->msix_info
[0].vec
, adap
);
952 free_irq(adap
->pdev
->irq
, adap
);
959 * Enable NAPI scheduling and interrupt generation for all Rx queues.
961 static void enable_rx(struct adapter
*adap
)
965 for (i
= 0; i
< adap
->sge
.ingr_sz
; i
++) {
966 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
971 cxgb_busy_poll_init_lock(q
);
972 napi_enable(&q
->napi
);
974 /* 0-increment GTS to start the timer and enable interrupts */
975 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS_A
),
976 SEINTARM_V(q
->intr_params
) |
977 INGRESSQID_V(q
->cntxt_id
));
981 static int alloc_ofld_rxqs(struct adapter
*adap
, struct sge_ofld_rxq
*q
,
982 unsigned int nq
, unsigned int per_chan
, int msi_idx
,
987 for (i
= 0; i
< nq
; i
++, q
++) {
990 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false,
991 adap
->port
[i
/ per_chan
],
992 msi_idx
, q
->fl
.size
? &q
->fl
: NULL
,
996 memset(&q
->stats
, 0, sizeof(q
->stats
));
998 ids
[i
] = q
->rspq
.abs_id
;
1004 * setup_sge_queues - configure SGE Tx/Rx/response queues
1005 * @adap: the adapter
1007 * Determines how many sets of SGE queues to use and initializes them.
1008 * We support multiple queue sets per port if we have MSI-X, otherwise
1009 * just one queue set per port.
1011 static int setup_sge_queues(struct adapter
*adap
)
1013 int err
, msi_idx
, i
, j
;
1014 struct sge
*s
= &adap
->sge
;
1016 bitmap_zero(s
->starving_fl
, s
->egr_sz
);
1017 bitmap_zero(s
->txq_maperr
, s
->egr_sz
);
1019 if (adap
->flags
& USING_MSIX
)
1020 msi_idx
= 1; /* vector 0 is for non-queue interrupts */
1022 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
1026 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
1029 /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
1030 * don't forget to update the following which need to be
1031 * synchronized to and changes here.
1033 * 1. The calculations of MAX_INGQ in cxgb4.h.
1035 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
1036 * to accommodate any new/deleted Ingress Queues
1037 * which need MSI-X Vectors.
1039 * 3. Update sge_qinfo_show() to include information on the
1040 * new/deleted queues.
1042 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
1043 msi_idx
, NULL
, fwevtq_handler
, -1);
1045 freeout
: t4_free_sge_resources(adap
);
1049 for_each_port(adap
, i
) {
1050 struct net_device
*dev
= adap
->port
[i
];
1051 struct port_info
*pi
= netdev_priv(dev
);
1052 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
1053 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
1055 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
1058 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
1061 t4_get_mps_bg_map(adap
,
1066 memset(&q
->stats
, 0, sizeof(q
->stats
));
1068 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
1069 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
1070 netdev_get_tx_queue(dev
, j
),
1071 s
->fw_evtq
.cntxt_id
);
1077 j
= s
->iscsiqsets
/ adap
->params
.nports
; /* iscsi queues per channel */
1078 for_each_iscsirxq(s
, i
) {
1079 err
= t4_sge_alloc_ofld_txq(adap
, &s
->ofldtxq
[i
],
1081 s
->fw_evtq
.cntxt_id
);
1086 #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
1087 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
1094 ALLOC_OFLD_RXQS(s
->iscsirxq
, s
->iscsiqsets
, j
, s
->iscsi_rxq
);
1095 ALLOC_OFLD_RXQS(s
->iscsitrxq
, s
->niscsitq
, j
, s
->iscsit_rxq
);
1096 ALLOC_OFLD_RXQS(s
->rdmarxq
, s
->rdmaqs
, 1, s
->rdma_rxq
);
1097 j
= s
->rdmaciqs
/ adap
->params
.nports
; /* rdmaq queues per channel */
1098 ALLOC_OFLD_RXQS(s
->rdmaciq
, s
->rdmaciqs
, j
, s
->rdma_ciq
);
1100 #undef ALLOC_OFLD_RXQS
1102 for_each_port(adap
, i
) {
1104 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1105 * have RDMA queues, and that's the right value.
1107 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
1108 s
->fw_evtq
.cntxt_id
,
1109 s
->rdmarxq
[i
].rspq
.cntxt_id
);
1114 t4_write_reg(adap
, is_t4(adap
->params
.chip
) ?
1115 MPS_TRC_RSS_CONTROL_A
:
1116 MPS_T5_TRC_RSS_CONTROL_A
,
1117 RSSCONTROL_V(netdev2pinfo(adap
->port
[0])->tx_chan
) |
1118 QUEUENUMBER_V(s
->ethrxq
[0].rspq
.abs_id
));
1123 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1124 * The allocated memory is cleared.
1126 void *t4_alloc_mem(size_t size
)
1128 void *p
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1136 * Free memory allocated through alloc_mem().
1138 void t4_free_mem(void *addr
)
1143 /* Send a Work Request to write the filter at a specified index. We construct
1144 * a Firmware Filter Work Request to have the work done and put the indicated
1145 * filter into "pending" mode which will prevent any further actions against
1146 * it till we get a reply from the firmware on the completion status of the
1149 static int set_filter_wr(struct adapter
*adapter
, int fidx
)
1151 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1152 struct sk_buff
*skb
;
1153 struct fw_filter_wr
*fwr
;
1156 skb
= alloc_skb(sizeof(*fwr
), GFP_KERNEL
);
1160 /* If the new filter requires loopback Destination MAC and/or VLAN
1161 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1164 if (f
->fs
.newdmac
|| f
->fs
.newvlan
) {
1165 /* allocate L2T entry for new filter */
1166 f
->l2t
= t4_l2t_alloc_switching(adapter
, f
->fs
.vlan
,
1167 f
->fs
.eport
, f
->fs
.dmac
);
1168 if (f
->l2t
== NULL
) {
1174 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1176 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, sizeof(*fwr
));
1177 memset(fwr
, 0, sizeof(*fwr
));
1179 /* It would be nice to put most of the following in t4_hw.c but most
1180 * of the work is translating the cxgbtool ch_filter_specification
1181 * into the Work Request and the definition of that structure is
1182 * currently in cxgbtool.h which isn't appropriate to pull into the
1183 * common code. We may eventually try to come up with a more neutral
1184 * filter specification structure but for now it's easiest to simply
1185 * put this fairly direct code in line ...
1187 fwr
->op_pkd
= htonl(FW_WR_OP_V(FW_FILTER_WR
));
1188 fwr
->len16_pkd
= htonl(FW_WR_LEN16_V(sizeof(*fwr
)/16));
1190 htonl(FW_FILTER_WR_TID_V(ftid
) |
1191 FW_FILTER_WR_RQTYPE_V(f
->fs
.type
) |
1192 FW_FILTER_WR_NOREPLY_V(0) |
1193 FW_FILTER_WR_IQ_V(f
->fs
.iq
));
1194 fwr
->del_filter_to_l2tix
=
1195 htonl(FW_FILTER_WR_RPTTID_V(f
->fs
.rpttid
) |
1196 FW_FILTER_WR_DROP_V(f
->fs
.action
== FILTER_DROP
) |
1197 FW_FILTER_WR_DIRSTEER_V(f
->fs
.dirsteer
) |
1198 FW_FILTER_WR_MASKHASH_V(f
->fs
.maskhash
) |
1199 FW_FILTER_WR_DIRSTEERHASH_V(f
->fs
.dirsteerhash
) |
1200 FW_FILTER_WR_LPBK_V(f
->fs
.action
== FILTER_SWITCH
) |
1201 FW_FILTER_WR_DMAC_V(f
->fs
.newdmac
) |
1202 FW_FILTER_WR_SMAC_V(f
->fs
.newsmac
) |
1203 FW_FILTER_WR_INSVLAN_V(f
->fs
.newvlan
== VLAN_INSERT
||
1204 f
->fs
.newvlan
== VLAN_REWRITE
) |
1205 FW_FILTER_WR_RMVLAN_V(f
->fs
.newvlan
== VLAN_REMOVE
||
1206 f
->fs
.newvlan
== VLAN_REWRITE
) |
1207 FW_FILTER_WR_HITCNTS_V(f
->fs
.hitcnts
) |
1208 FW_FILTER_WR_TXCHAN_V(f
->fs
.eport
) |
1209 FW_FILTER_WR_PRIO_V(f
->fs
.prio
) |
1210 FW_FILTER_WR_L2TIX_V(f
->l2t
? f
->l2t
->idx
: 0));
1211 fwr
->ethtype
= htons(f
->fs
.val
.ethtype
);
1212 fwr
->ethtypem
= htons(f
->fs
.mask
.ethtype
);
1213 fwr
->frag_to_ovlan_vldm
=
1214 (FW_FILTER_WR_FRAG_V(f
->fs
.val
.frag
) |
1215 FW_FILTER_WR_FRAGM_V(f
->fs
.mask
.frag
) |
1216 FW_FILTER_WR_IVLAN_VLD_V(f
->fs
.val
.ivlan_vld
) |
1217 FW_FILTER_WR_OVLAN_VLD_V(f
->fs
.val
.ovlan_vld
) |
1218 FW_FILTER_WR_IVLAN_VLDM_V(f
->fs
.mask
.ivlan_vld
) |
1219 FW_FILTER_WR_OVLAN_VLDM_V(f
->fs
.mask
.ovlan_vld
));
1221 fwr
->rx_chan_rx_rpl_iq
=
1222 htons(FW_FILTER_WR_RX_CHAN_V(0) |
1223 FW_FILTER_WR_RX_RPL_IQ_V(adapter
->sge
.fw_evtq
.abs_id
));
1224 fwr
->maci_to_matchtypem
=
1225 htonl(FW_FILTER_WR_MACI_V(f
->fs
.val
.macidx
) |
1226 FW_FILTER_WR_MACIM_V(f
->fs
.mask
.macidx
) |
1227 FW_FILTER_WR_FCOE_V(f
->fs
.val
.fcoe
) |
1228 FW_FILTER_WR_FCOEM_V(f
->fs
.mask
.fcoe
) |
1229 FW_FILTER_WR_PORT_V(f
->fs
.val
.iport
) |
1230 FW_FILTER_WR_PORTM_V(f
->fs
.mask
.iport
) |
1231 FW_FILTER_WR_MATCHTYPE_V(f
->fs
.val
.matchtype
) |
1232 FW_FILTER_WR_MATCHTYPEM_V(f
->fs
.mask
.matchtype
));
1233 fwr
->ptcl
= f
->fs
.val
.proto
;
1234 fwr
->ptclm
= f
->fs
.mask
.proto
;
1235 fwr
->ttyp
= f
->fs
.val
.tos
;
1236 fwr
->ttypm
= f
->fs
.mask
.tos
;
1237 fwr
->ivlan
= htons(f
->fs
.val
.ivlan
);
1238 fwr
->ivlanm
= htons(f
->fs
.mask
.ivlan
);
1239 fwr
->ovlan
= htons(f
->fs
.val
.ovlan
);
1240 fwr
->ovlanm
= htons(f
->fs
.mask
.ovlan
);
1241 memcpy(fwr
->lip
, f
->fs
.val
.lip
, sizeof(fwr
->lip
));
1242 memcpy(fwr
->lipm
, f
->fs
.mask
.lip
, sizeof(fwr
->lipm
));
1243 memcpy(fwr
->fip
, f
->fs
.val
.fip
, sizeof(fwr
->fip
));
1244 memcpy(fwr
->fipm
, f
->fs
.mask
.fip
, sizeof(fwr
->fipm
));
1245 fwr
->lp
= htons(f
->fs
.val
.lport
);
1246 fwr
->lpm
= htons(f
->fs
.mask
.lport
);
1247 fwr
->fp
= htons(f
->fs
.val
.fport
);
1248 fwr
->fpm
= htons(f
->fs
.mask
.fport
);
1250 memcpy(fwr
->sma
, f
->fs
.smac
, sizeof(fwr
->sma
));
1252 /* Mark the filter as "pending" and ship off the Filter Work Request.
1253 * When we get the Work Request Reply we'll clear the pending status.
1256 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, f
->fs
.val
.iport
& 0x3);
1257 t4_ofld_send(adapter
, skb
);
1261 /* Delete the filter at a specified index.
1263 static int del_filter_wr(struct adapter
*adapter
, int fidx
)
1265 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1266 struct sk_buff
*skb
;
1267 struct fw_filter_wr
*fwr
;
1268 unsigned int len
, ftid
;
1271 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1273 skb
= alloc_skb(len
, GFP_KERNEL
);
1277 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, len
);
1278 t4_mk_filtdelwr(ftid
, fwr
, adapter
->sge
.fw_evtq
.abs_id
);
1280 /* Mark the filter as "pending" and ship off the Filter Work Request.
1281 * When we get the Work Request Reply we'll clear the pending status.
1284 t4_mgmt_tx(adapter
, skb
);
1288 static u16
cxgb_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1289 void *accel_priv
, select_queue_fallback_t fallback
)
1293 #ifdef CONFIG_CHELSIO_T4_DCB
1294 /* If a Data Center Bridging has been successfully negotiated on this
1295 * link then we'll use the skb's priority to map it to a TX Queue.
1296 * The skb's priority is determined via the VLAN Tag Priority Code
1299 if (cxgb4_dcb_enabled(dev
)) {
1303 err
= vlan_get_tag(skb
, &vlan_tci
);
1304 if (unlikely(err
)) {
1305 if (net_ratelimit())
1307 "TX Packet without VLAN Tag on DCB Link\n");
1310 txq
= (vlan_tci
& VLAN_PRIO_MASK
) >> VLAN_PRIO_SHIFT
;
1311 #ifdef CONFIG_CHELSIO_T4_FCOE
1312 if (skb
->protocol
== htons(ETH_P_FCOE
))
1313 txq
= skb
->priority
& 0x7;
1314 #endif /* CONFIG_CHELSIO_T4_FCOE */
1318 #endif /* CONFIG_CHELSIO_T4_DCB */
1321 txq
= (skb_rx_queue_recorded(skb
)
1322 ? skb_get_rx_queue(skb
)
1323 : smp_processor_id());
1325 while (unlikely(txq
>= dev
->real_num_tx_queues
))
1326 txq
-= dev
->real_num_tx_queues
;
1331 return fallback(dev
, skb
) % dev
->real_num_tx_queues
;
1334 static int closest_timer(const struct sge
*s
, int time
)
1336 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1338 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
1339 delta
= time
- s
->timer_val
[i
];
1342 if (delta
< min_delta
) {
1350 static int closest_thres(const struct sge
*s
, int thres
)
1352 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1354 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
1355 delta
= thres
- s
->counter_val
[i
];
1358 if (delta
< min_delta
) {
1367 * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1369 * @us: the hold-off time in us, or 0 to disable timer
1370 * @cnt: the hold-off packet count, or 0 to disable counter
1372 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1373 * one of the two needs to be enabled for the queue to generate interrupts.
1375 int cxgb4_set_rspq_intr_params(struct sge_rspq
*q
,
1376 unsigned int us
, unsigned int cnt
)
1378 struct adapter
*adap
= q
->adap
;
1380 if ((us
| cnt
) == 0)
1387 new_idx
= closest_thres(&adap
->sge
, cnt
);
1388 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
1389 /* the queue has already been created, update it */
1390 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1391 FW_PARAMS_PARAM_X_V(
1392 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1393 FW_PARAMS_PARAM_YZ_V(q
->cntxt_id
);
1394 err
= t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
1399 q
->pktcnt_idx
= new_idx
;
1402 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
1403 q
->intr_params
= QINTR_TIMER_IDX_V(us
) | QINTR_CNT_EN_V(cnt
> 0);
1407 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
1409 const struct port_info
*pi
= netdev_priv(dev
);
1410 netdev_features_t changed
= dev
->features
^ features
;
1413 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
1416 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, -1,
1418 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
1420 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
1424 static int setup_debugfs(struct adapter
*adap
)
1426 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1429 #ifdef CONFIG_DEBUG_FS
1430 t4_setup_debugfs(adap
);
1436 * upper-layer driver support
1440 * Allocate an active-open TID and set it to the supplied value.
1442 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1446 spin_lock_bh(&t
->atid_lock
);
1448 union aopen_entry
*p
= t
->afree
;
1450 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
1455 spin_unlock_bh(&t
->atid_lock
);
1458 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1461 * Release an active-open TID.
1463 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1465 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
1467 spin_lock_bh(&t
->atid_lock
);
1471 spin_unlock_bh(&t
->atid_lock
);
1473 EXPORT_SYMBOL(cxgb4_free_atid
);
1476 * Allocate a server TID and set it to the supplied value.
1478 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1482 spin_lock_bh(&t
->stid_lock
);
1483 if (family
== PF_INET
) {
1484 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1485 if (stid
< t
->nstids
)
1486 __set_bit(stid
, t
->stid_bmap
);
1490 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 1);
1495 t
->stid_tab
[stid
].data
= data
;
1496 stid
+= t
->stid_base
;
1497 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1498 * This is equivalent to 4 TIDs. With CLIP enabled it
1501 if (family
== PF_INET
)
1504 t
->stids_in_use
+= 2;
1506 spin_unlock_bh(&t
->stid_lock
);
1509 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1511 /* Allocate a server filter TID and set it to the supplied value.
1513 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
1517 spin_lock_bh(&t
->stid_lock
);
1518 if (family
== PF_INET
) {
1519 stid
= find_next_zero_bit(t
->stid_bmap
,
1520 t
->nstids
+ t
->nsftids
, t
->nstids
);
1521 if (stid
< (t
->nstids
+ t
->nsftids
))
1522 __set_bit(stid
, t
->stid_bmap
);
1529 t
->stid_tab
[stid
].data
= data
;
1531 stid
+= t
->sftid_base
;
1534 spin_unlock_bh(&t
->stid_lock
);
1537 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
1539 /* Release a server TID.
1541 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1543 /* Is it a server filter TID? */
1544 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
1545 stid
-= t
->sftid_base
;
1548 stid
-= t
->stid_base
;
1551 spin_lock_bh(&t
->stid_lock
);
1552 if (family
== PF_INET
)
1553 __clear_bit(stid
, t
->stid_bmap
);
1555 bitmap_release_region(t
->stid_bmap
, stid
, 1);
1556 t
->stid_tab
[stid
].data
= NULL
;
1557 if (stid
< t
->nstids
) {
1558 if (family
== PF_INET
)
1561 t
->stids_in_use
-= 2;
1565 spin_unlock_bh(&t
->stid_lock
);
1567 EXPORT_SYMBOL(cxgb4_free_stid
);
1570 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1572 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1575 struct cpl_tid_release
*req
;
1577 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1578 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
1579 INIT_TP_WR(req
, tid
);
1580 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1584 * Queue a TID release request and if necessary schedule a work queue to
1587 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1590 void **p
= &t
->tid_tab
[tid
];
1591 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1593 spin_lock_bh(&adap
->tid_release_lock
);
1594 *p
= adap
->tid_release_head
;
1595 /* Low 2 bits encode the Tx channel number */
1596 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1597 if (!adap
->tid_release_task_busy
) {
1598 adap
->tid_release_task_busy
= true;
1599 queue_work(adap
->workq
, &adap
->tid_release_task
);
1601 spin_unlock_bh(&adap
->tid_release_lock
);
1605 * Process the list of pending TID release requests.
1607 static void process_tid_release_list(struct work_struct
*work
)
1609 struct sk_buff
*skb
;
1610 struct adapter
*adap
;
1612 adap
= container_of(work
, struct adapter
, tid_release_task
);
1614 spin_lock_bh(&adap
->tid_release_lock
);
1615 while (adap
->tid_release_head
) {
1616 void **p
= adap
->tid_release_head
;
1617 unsigned int chan
= (uintptr_t)p
& 3;
1618 p
= (void *)p
- chan
;
1620 adap
->tid_release_head
= *p
;
1622 spin_unlock_bh(&adap
->tid_release_lock
);
1624 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1626 schedule_timeout_uninterruptible(1);
1628 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
1629 t4_ofld_send(adap
, skb
);
1630 spin_lock_bh(&adap
->tid_release_lock
);
1632 adap
->tid_release_task_busy
= false;
1633 spin_unlock_bh(&adap
->tid_release_lock
);
1637 * Release a TID and inform HW. If we are unable to allocate the release
1638 * message we defer to a work queue.
1640 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
1642 struct sk_buff
*skb
;
1643 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1645 WARN_ON(tid
>= t
->ntids
);
1647 if (t
->tid_tab
[tid
]) {
1648 t
->tid_tab
[tid
] = NULL
;
1649 if (t
->hash_base
&& (tid
>= t
->hash_base
))
1650 atomic_dec(&t
->hash_tids_in_use
);
1652 atomic_dec(&t
->tids_in_use
);
1655 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
1657 mk_tid_release(skb
, chan
, tid
);
1658 t4_ofld_send(adap
, skb
);
1660 cxgb4_queue_tid_release(t
, chan
, tid
);
1662 EXPORT_SYMBOL(cxgb4_remove_tid
);
1665 * Allocate and initialize the TID tables. Returns 0 on success.
1667 static int tid_init(struct tid_info
*t
)
1670 unsigned int stid_bmap_size
;
1671 unsigned int natids
= t
->natids
;
1672 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1674 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
1675 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
1676 natids
* sizeof(*t
->atid_tab
) +
1677 t
->nstids
* sizeof(*t
->stid_tab
) +
1678 t
->nsftids
* sizeof(*t
->stid_tab
) +
1679 stid_bmap_size
* sizeof(long) +
1680 t
->nftids
* sizeof(*t
->ftid_tab
) +
1681 t
->nsftids
* sizeof(*t
->ftid_tab
);
1683 t
->tid_tab
= t4_alloc_mem(size
);
1687 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
1688 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
1689 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
1690 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
1691 spin_lock_init(&t
->stid_lock
);
1692 spin_lock_init(&t
->atid_lock
);
1694 t
->stids_in_use
= 0;
1695 t
->sftids_in_use
= 0;
1697 t
->atids_in_use
= 0;
1698 atomic_set(&t
->tids_in_use
, 0);
1699 atomic_set(&t
->hash_tids_in_use
, 0);
1701 /* Setup the free list for atid_tab and clear the stid bitmap. */
1704 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
1705 t
->afree
= t
->atid_tab
;
1707 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
1708 /* Reserve stid 0 for T4/T5 adapters */
1709 if (!t
->stid_base
&&
1710 (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
))
1711 __set_bit(0, t
->stid_bmap
);
1717 * cxgb4_create_server - create an IP server
1719 * @stid: the server TID
1720 * @sip: local IP address to bind server to
1721 * @sport: the server's TCP port
1722 * @queue: queue to direct messages from this server to
1724 * Create an IP server for the given port and address.
1725 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1727 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
1728 __be32 sip
, __be16 sport
, __be16 vlan
,
1732 struct sk_buff
*skb
;
1733 struct adapter
*adap
;
1734 struct cpl_pass_open_req
*req
;
1737 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1741 adap
= netdev2adap(dev
);
1742 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
1744 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
1745 req
->local_port
= sport
;
1746 req
->peer_port
= htons(0);
1747 req
->local_ip
= sip
;
1748 req
->peer_ip
= htonl(0);
1749 chan
= rxq_to_chan(&adap
->sge
, queue
);
1750 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1751 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1752 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1753 ret
= t4_mgmt_tx(adap
, skb
);
1754 return net_xmit_eval(ret
);
1756 EXPORT_SYMBOL(cxgb4_create_server
);
1758 /* cxgb4_create_server6 - create an IPv6 server
1760 * @stid: the server TID
1761 * @sip: local IPv6 address to bind server to
1762 * @sport: the server's TCP port
1763 * @queue: queue to direct messages from this server to
1765 * Create an IPv6 server for the given port and address.
1766 * Returns <0 on error and one of the %NET_XMIT_* values on success.
1768 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
1769 const struct in6_addr
*sip
, __be16 sport
,
1773 struct sk_buff
*skb
;
1774 struct adapter
*adap
;
1775 struct cpl_pass_open_req6
*req
;
1778 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1782 adap
= netdev2adap(dev
);
1783 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
1785 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
1786 req
->local_port
= sport
;
1787 req
->peer_port
= htons(0);
1788 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
1789 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
1790 req
->peer_ip_hi
= cpu_to_be64(0);
1791 req
->peer_ip_lo
= cpu_to_be64(0);
1792 chan
= rxq_to_chan(&adap
->sge
, queue
);
1793 req
->opt0
= cpu_to_be64(TX_CHAN_V(chan
));
1794 req
->opt1
= cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK
) |
1795 SYN_RSS_ENABLE_F
| SYN_RSS_QUEUE_V(queue
));
1796 ret
= t4_mgmt_tx(adap
, skb
);
1797 return net_xmit_eval(ret
);
1799 EXPORT_SYMBOL(cxgb4_create_server6
);
1801 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
1802 unsigned int queue
, bool ipv6
)
1804 struct sk_buff
*skb
;
1805 struct adapter
*adap
;
1806 struct cpl_close_listsvr_req
*req
;
1809 adap
= netdev2adap(dev
);
1811 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
1815 req
= (struct cpl_close_listsvr_req
*)__skb_put(skb
, sizeof(*req
));
1817 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
1818 req
->reply_ctrl
= htons(NO_REPLY_V(0) | (ipv6
? LISTSVR_IPV6_V(1) :
1819 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue
));
1820 ret
= t4_mgmt_tx(adap
, skb
);
1821 return net_xmit_eval(ret
);
1823 EXPORT_SYMBOL(cxgb4_remove_server
);
1826 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
1827 * @mtus: the HW MTU table
1828 * @mtu: the target MTU
1829 * @idx: index of selected entry in the MTU table
1831 * Returns the index and the value in the HW MTU table that is closest to
1832 * but does not exceed @mtu, unless @mtu is smaller than any value in the
1833 * table, in which case that smallest available value is selected.
1835 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
1840 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
1846 EXPORT_SYMBOL(cxgb4_best_mtu
);
1849 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
1850 * @mtus: the HW MTU table
1851 * @header_size: Header Size
1852 * @data_size_max: maximum Data Segment Size
1853 * @data_size_align: desired Data Segment Size Alignment (2^N)
1854 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
1856 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
1857 * MTU Table based solely on a Maximum MTU parameter, we break that
1858 * parameter up into a Header Size and Maximum Data Segment Size, and
1859 * provide a desired Data Segment Size Alignment. If we find an MTU in
1860 * the Hardware MTU Table which will result in a Data Segment Size with
1861 * the requested alignment _and_ that MTU isn't "too far" from the
1862 * closest MTU, then we'll return that rather than the closest MTU.
1864 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus
,
1865 unsigned short header_size
,
1866 unsigned short data_size_max
,
1867 unsigned short data_size_align
,
1868 unsigned int *mtu_idxp
)
1870 unsigned short max_mtu
= header_size
+ data_size_max
;
1871 unsigned short data_size_align_mask
= data_size_align
- 1;
1872 int mtu_idx
, aligned_mtu_idx
;
1874 /* Scan the MTU Table till we find an MTU which is larger than our
1875 * Maximum MTU or we reach the end of the table. Along the way,
1876 * record the last MTU found, if any, which will result in a Data
1877 * Segment Length matching the requested alignment.
1879 for (mtu_idx
= 0, aligned_mtu_idx
= -1; mtu_idx
< NMTUS
; mtu_idx
++) {
1880 unsigned short data_size
= mtus
[mtu_idx
] - header_size
;
1882 /* If this MTU minus the Header Size would result in a
1883 * Data Segment Size of the desired alignment, remember it.
1885 if ((data_size
& data_size_align_mask
) == 0)
1886 aligned_mtu_idx
= mtu_idx
;
1888 /* If we're not at the end of the Hardware MTU Table and the
1889 * next element is larger than our Maximum MTU, drop out of
1892 if (mtu_idx
+1 < NMTUS
&& mtus
[mtu_idx
+1] > max_mtu
)
1896 /* If we fell out of the loop because we ran to the end of the table,
1897 * then we just have to use the last [largest] entry.
1899 if (mtu_idx
== NMTUS
)
1902 /* If we found an MTU which resulted in the requested Data Segment
1903 * Length alignment and that's "not far" from the largest MTU which is
1904 * less than or equal to the maximum MTU, then use that.
1906 if (aligned_mtu_idx
>= 0 &&
1907 mtu_idx
- aligned_mtu_idx
<= 1)
1908 mtu_idx
= aligned_mtu_idx
;
1910 /* If the caller has passed in an MTU Index pointer, pass the
1911 * MTU Index back. Return the MTU value.
1914 *mtu_idxp
= mtu_idx
;
1915 return mtus
[mtu_idx
];
1917 EXPORT_SYMBOL(cxgb4_best_aligned_mtu
);
1920 * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
1922 * @viid: VI id of the given port
1924 * Return the SMT index for this VI.
1926 unsigned int cxgb4_tp_smt_idx(enum chip_type chip
, unsigned int viid
)
1928 /* In T4/T5, SMT contains 256 SMAC entries organized in
1929 * 128 rows of 2 entries each.
1930 * In T6, SMT contains 256 SMAC entries in 256 rows.
1931 * TODO: The below code needs to be updated when we add support
1934 if (CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
)
1935 return ((viid
& 0x7f) << 1);
1937 return (viid
& 0x7f);
1939 EXPORT_SYMBOL(cxgb4_tp_smt_idx
);
1942 * cxgb4_port_chan - get the HW channel of a port
1943 * @dev: the net device for the port
1945 * Return the HW Tx channel of the given port.
1947 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
1949 return netdev2pinfo(dev
)->tx_chan
;
1951 EXPORT_SYMBOL(cxgb4_port_chan
);
1953 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
1955 struct adapter
*adap
= netdev2adap(dev
);
1956 u32 v1
, v2
, lp_count
, hp_count
;
1958 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
1959 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
1960 if (is_t4(adap
->params
.chip
)) {
1961 lp_count
= LP_COUNT_G(v1
);
1962 hp_count
= HP_COUNT_G(v1
);
1964 lp_count
= LP_COUNT_T5_G(v1
);
1965 hp_count
= HP_COUNT_T5_G(v2
);
1967 return lpfifo
? lp_count
: hp_count
;
1969 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
1972 * cxgb4_port_viid - get the VI id of a port
1973 * @dev: the net device for the port
1975 * Return the VI id of the given port.
1977 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
1979 return netdev2pinfo(dev
)->viid
;
1981 EXPORT_SYMBOL(cxgb4_port_viid
);
1984 * cxgb4_port_idx - get the index of a port
1985 * @dev: the net device for the port
1987 * Return the index of the given port.
1989 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
1991 return netdev2pinfo(dev
)->port_id
;
1993 EXPORT_SYMBOL(cxgb4_port_idx
);
1995 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
1996 struct tp_tcp_stats
*v6
)
1998 struct adapter
*adap
= pci_get_drvdata(pdev
);
2000 spin_lock(&adap
->stats_lock
);
2001 t4_tp_get_tcp_stats(adap
, v4
, v6
);
2002 spin_unlock(&adap
->stats_lock
);
2004 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
2006 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
2007 const unsigned int *pgsz_order
)
2009 struct adapter
*adap
= netdev2adap(dev
);
2011 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK_A
, tag_mask
);
2012 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ_A
, HPZ0_V(pgsz_order
[0]) |
2013 HPZ1_V(pgsz_order
[1]) | HPZ2_V(pgsz_order
[2]) |
2014 HPZ3_V(pgsz_order
[3]));
2016 EXPORT_SYMBOL(cxgb4_iscsi_init
);
2018 int cxgb4_flush_eq_cache(struct net_device
*dev
)
2020 struct adapter
*adap
= netdev2adap(dev
);
2022 return t4_sge_ctxt_flush(adap
, adap
->mbox
);
2024 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
2026 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
2028 u32 addr
= t4_read_reg(adap
, SGE_DBQ_CTXT_BADDR_A
) + 24 * qid
+ 8;
2032 spin_lock(&adap
->win0_lock
);
2033 ret
= t4_memory_rw(adap
, 0, MEM_EDC0
, addr
,
2034 sizeof(indices
), (__be32
*)&indices
,
2036 spin_unlock(&adap
->win0_lock
);
2038 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
2039 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
2044 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
2047 struct adapter
*adap
= netdev2adap(dev
);
2048 u16 hw_pidx
, hw_cidx
;
2051 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
2055 if (pidx
!= hw_pidx
) {
2059 if (pidx
>= hw_pidx
)
2060 delta
= pidx
- hw_pidx
;
2062 delta
= size
- hw_pidx
+ pidx
;
2064 if (is_t4(adap
->params
.chip
))
2065 val
= PIDX_V(delta
);
2067 val
= PIDX_T5_V(delta
);
2069 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2075 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
2077 int cxgb4_read_tpte(struct net_device
*dev
, u32 stag
, __be32
*tpte
)
2079 struct adapter
*adap
;
2080 u32 offset
, memtype
, memaddr
;
2081 u32 edc0_size
, edc1_size
, mc0_size
, mc1_size
, size
;
2082 u32 edc0_end
, edc1_end
, mc0_end
, mc1_end
;
2085 adap
= netdev2adap(dev
);
2087 offset
= ((stag
>> 8) * 32) + adap
->vres
.stag
.start
;
2089 /* Figure out where the offset lands in the Memory Type/Address scheme.
2090 * This code assumes that the memory is laid out starting at offset 0
2091 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2092 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
2093 * MC0, and some have both MC0 and MC1.
2095 size
= t4_read_reg(adap
, MA_EDRAM0_BAR_A
);
2096 edc0_size
= EDRAM0_SIZE_G(size
) << 20;
2097 size
= t4_read_reg(adap
, MA_EDRAM1_BAR_A
);
2098 edc1_size
= EDRAM1_SIZE_G(size
) << 20;
2099 size
= t4_read_reg(adap
, MA_EXT_MEMORY0_BAR_A
);
2100 mc0_size
= EXT_MEM0_SIZE_G(size
) << 20;
2102 edc0_end
= edc0_size
;
2103 edc1_end
= edc0_end
+ edc1_size
;
2104 mc0_end
= edc1_end
+ mc0_size
;
2106 if (offset
< edc0_end
) {
2109 } else if (offset
< edc1_end
) {
2111 memaddr
= offset
- edc0_end
;
2113 if (offset
< mc0_end
) {
2115 memaddr
= offset
- edc1_end
;
2116 } else if (is_t5(adap
->params
.chip
)) {
2117 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR_A
);
2118 mc1_size
= EXT_MEM1_SIZE_G(size
) << 20;
2119 mc1_end
= mc0_end
+ mc1_size
;
2120 if (offset
< mc1_end
) {
2122 memaddr
= offset
- mc0_end
;
2124 /* offset beyond the end of any memory */
2128 /* T4/T6 only has a single memory channel */
2133 spin_lock(&adap
->win0_lock
);
2134 ret
= t4_memory_rw(adap
, 0, memtype
, memaddr
, 32, tpte
, T4_MEMORY_READ
);
2135 spin_unlock(&adap
->win0_lock
);
2139 dev_err(adap
->pdev_dev
, "stag %#x, offset %#x out of range\n",
2143 EXPORT_SYMBOL(cxgb4_read_tpte
);
2145 u64
cxgb4_read_sge_timestamp(struct net_device
*dev
)
2148 struct adapter
*adap
;
2150 adap
= netdev2adap(dev
);
2151 lo
= t4_read_reg(adap
, SGE_TIMESTAMP_LO_A
);
2152 hi
= TSVAL_G(t4_read_reg(adap
, SGE_TIMESTAMP_HI_A
));
2154 return ((u64
)hi
<< 32) | (u64
)lo
;
2156 EXPORT_SYMBOL(cxgb4_read_sge_timestamp
);
2158 int cxgb4_bar2_sge_qregs(struct net_device
*dev
,
2160 enum cxgb4_bar2_qtype qtype
,
2163 unsigned int *pbar2_qid
)
2165 return t4_bar2_sge_qregs(netdev2adap(dev
),
2167 (qtype
== CXGB4_BAR2_QTYPE_EGRESS
2168 ? T4_BAR2_QTYPE_EGRESS
2169 : T4_BAR2_QTYPE_INGRESS
),
2174 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs
);
2176 static struct pci_driver cxgb4_driver
;
2178 static void check_neigh_update(struct neighbour
*neigh
)
2180 const struct device
*parent
;
2181 const struct net_device
*netdev
= neigh
->dev
;
2183 if (netdev
->priv_flags
& IFF_802_1Q_VLAN
)
2184 netdev
= vlan_dev_real_dev(netdev
);
2185 parent
= netdev
->dev
.parent
;
2186 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
2187 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
2190 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
2194 case NETEVENT_NEIGH_UPDATE
:
2195 check_neigh_update(data
);
2197 case NETEVENT_REDIRECT
:
2204 static bool netevent_registered
;
2205 static struct notifier_block cxgb4_netevent_nb
= {
2206 .notifier_call
= netevent_cb
2209 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
2211 u32 v1
, v2
, lp_count
, hp_count
;
2214 v1
= t4_read_reg(adap
, SGE_DBFIFO_STATUS_A
);
2215 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2_A
);
2216 if (is_t4(adap
->params
.chip
)) {
2217 lp_count
= LP_COUNT_G(v1
);
2218 hp_count
= HP_COUNT_G(v1
);
2220 lp_count
= LP_COUNT_T5_G(v1
);
2221 hp_count
= HP_COUNT_T5_G(v2
);
2224 if (lp_count
== 0 && hp_count
== 0)
2226 set_current_state(TASK_UNINTERRUPTIBLE
);
2227 schedule_timeout(usecs_to_jiffies(usecs
));
2231 static void disable_txq_db(struct sge_txq
*q
)
2233 unsigned long flags
;
2235 spin_lock_irqsave(&q
->db_lock
, flags
);
2237 spin_unlock_irqrestore(&q
->db_lock
, flags
);
2240 static void enable_txq_db(struct adapter
*adap
, struct sge_txq
*q
)
2242 spin_lock_irq(&q
->db_lock
);
2243 if (q
->db_pidx_inc
) {
2244 /* Make sure that all writes to the TX descriptors
2245 * are committed before we tell HW about them.
2248 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2249 QID_V(q
->cntxt_id
) | PIDX_V(q
->db_pidx_inc
));
2253 spin_unlock_irq(&q
->db_lock
);
2256 static void disable_dbs(struct adapter
*adap
)
2260 for_each_ethrxq(&adap
->sge
, i
)
2261 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
2262 for_each_iscsirxq(&adap
->sge
, i
)
2263 disable_txq_db(&adap
->sge
.ofldtxq
[i
].q
);
2264 for_each_port(adap
, i
)
2265 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
2268 static void enable_dbs(struct adapter
*adap
)
2272 for_each_ethrxq(&adap
->sge
, i
)
2273 enable_txq_db(adap
, &adap
->sge
.ethtxq
[i
].q
);
2274 for_each_iscsirxq(&adap
->sge
, i
)
2275 enable_txq_db(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2276 for_each_port(adap
, i
)
2277 enable_txq_db(adap
, &adap
->sge
.ctrlq
[i
].q
);
2280 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
2282 if (adap
->uld_handle
[CXGB4_ULD_RDMA
])
2283 ulds
[CXGB4_ULD_RDMA
].control(adap
->uld_handle
[CXGB4_ULD_RDMA
],
2287 static void process_db_full(struct work_struct
*work
)
2289 struct adapter
*adap
;
2291 adap
= container_of(work
, struct adapter
, db_full_task
);
2293 drain_db_fifo(adap
, dbfifo_drain_delay
);
2295 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2296 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2297 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2298 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
,
2299 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
);
2301 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2302 DBFIFO_LP_INT_F
, DBFIFO_LP_INT_F
);
2305 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
2307 u16 hw_pidx
, hw_cidx
;
2310 spin_lock_irq(&q
->db_lock
);
2311 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
2314 if (q
->db_pidx
!= hw_pidx
) {
2318 if (q
->db_pidx
>= hw_pidx
)
2319 delta
= q
->db_pidx
- hw_pidx
;
2321 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
2323 if (is_t4(adap
->params
.chip
))
2324 val
= PIDX_V(delta
);
2326 val
= PIDX_T5_V(delta
);
2328 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL_A
),
2329 QID_V(q
->cntxt_id
) | val
);
2334 spin_unlock_irq(&q
->db_lock
);
2336 CH_WARN(adap
, "DB drop recovery failed.\n");
2338 static void recover_all_queues(struct adapter
*adap
)
2342 for_each_ethrxq(&adap
->sge
, i
)
2343 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
2344 for_each_iscsirxq(&adap
->sge
, i
)
2345 sync_txq_pidx(adap
, &adap
->sge
.ofldtxq
[i
].q
);
2346 for_each_port(adap
, i
)
2347 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
2350 static void process_db_drop(struct work_struct
*work
)
2352 struct adapter
*adap
;
2354 adap
= container_of(work
, struct adapter
, db_drop_task
);
2356 if (is_t4(adap
->params
.chip
)) {
2357 drain_db_fifo(adap
, dbfifo_drain_delay
);
2358 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
2359 drain_db_fifo(adap
, dbfifo_drain_delay
);
2360 recover_all_queues(adap
);
2361 drain_db_fifo(adap
, dbfifo_drain_delay
);
2363 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
2364 } else if (is_t5(adap
->params
.chip
)) {
2365 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
2366 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
2367 u16 pidx_inc
= dropped_db
& 0x1fff;
2369 unsigned int bar2_qid
;
2372 ret
= t4_bar2_sge_qregs(adap
, qid
, T4_BAR2_QTYPE_EGRESS
,
2373 0, &bar2_qoffset
, &bar2_qid
);
2375 dev_err(adap
->pdev_dev
, "doorbell drop recovery: "
2376 "qid=%d, pidx_inc=%d\n", qid
, pidx_inc
);
2378 writel(PIDX_T5_V(pidx_inc
) | QID_V(bar2_qid
),
2379 adap
->bar2
+ bar2_qoffset
+ SGE_UDB_KDOORBELL
);
2381 /* Re-enable BAR2 WC */
2382 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
2385 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) <= CHELSIO_T5
)
2386 t4_set_reg_field(adap
, SGE_DOORBELL_CONTROL_A
, DROPPED_DB_F
, 0);
2389 void t4_db_full(struct adapter
*adap
)
2391 if (is_t4(adap
->params
.chip
)) {
2393 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2394 t4_set_reg_field(adap
, SGE_INT_ENABLE3_A
,
2395 DBFIFO_HP_INT_F
| DBFIFO_LP_INT_F
, 0);
2396 queue_work(adap
->workq
, &adap
->db_full_task
);
2400 void t4_db_dropped(struct adapter
*adap
)
2402 if (is_t4(adap
->params
.chip
)) {
2404 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
2406 queue_work(adap
->workq
, &adap
->db_drop_task
);
2409 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
2412 struct cxgb4_lld_info lli
;
2415 lli
.pdev
= adap
->pdev
;
2417 lli
.l2t
= adap
->l2t
;
2418 lli
.tids
= &adap
->tids
;
2419 lli
.ports
= adap
->port
;
2420 lli
.vr
= &adap
->vres
;
2421 lli
.mtus
= adap
->params
.mtus
;
2422 if (uld
== CXGB4_ULD_RDMA
) {
2423 lli
.rxq_ids
= adap
->sge
.rdma_rxq
;
2424 lli
.ciq_ids
= adap
->sge
.rdma_ciq
;
2425 lli
.nrxq
= adap
->sge
.rdmaqs
;
2426 lli
.nciq
= adap
->sge
.rdmaciqs
;
2427 } else if (uld
== CXGB4_ULD_ISCSI
) {
2428 lli
.rxq_ids
= adap
->sge
.iscsi_rxq
;
2429 lli
.nrxq
= adap
->sge
.iscsiqsets
;
2430 } else if (uld
== CXGB4_ULD_ISCSIT
) {
2431 lli
.rxq_ids
= adap
->sge
.iscsit_rxq
;
2432 lli
.nrxq
= adap
->sge
.niscsitq
;
2434 lli
.ntxq
= adap
->sge
.iscsiqsets
;
2435 lli
.nchan
= adap
->params
.nports
;
2436 lli
.nports
= adap
->params
.nports
;
2437 lli
.wr_cred
= adap
->params
.ofldq_wr_cred
;
2438 lli
.adapter_type
= adap
->params
.chip
;
2439 lli
.iscsi_iolen
= MAXRXDATA_G(t4_read_reg(adap
, TP_PARA_REG2_A
));
2440 lli
.cclk_ps
= 1000000000 / adap
->params
.vpd
.cclk
;
2441 lli
.udb_density
= 1 << adap
->params
.sge
.eq_qpp
;
2442 lli
.ucq_density
= 1 << adap
->params
.sge
.iq_qpp
;
2443 lli
.filt_mode
= adap
->params
.tp
.vlan_pri_map
;
2444 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
2445 for (i
= 0; i
< NCHAN
; i
++)
2447 lli
.gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS_A
);
2448 lli
.db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL_A
);
2449 lli
.fw_vers
= adap
->params
.fw_vers
;
2450 lli
.dbfifo_int_thresh
= dbfifo_int_thresh
;
2451 lli
.sge_ingpadboundary
= adap
->sge
.fl_align
;
2452 lli
.sge_egrstatuspagesize
= adap
->sge
.stat_len
;
2453 lli
.sge_pktshift
= adap
->sge
.pktshift
;
2454 lli
.enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
2455 lli
.max_ordird_qp
= adap
->params
.max_ordird_qp
;
2456 lli
.max_ird_adapter
= adap
->params
.max_ird_adapter
;
2457 lli
.ulptx_memwrite_dsgl
= adap
->params
.ulptx_memwrite_dsgl
;
2458 lli
.nodeid
= dev_to_node(adap
->pdev_dev
);
2460 handle
= ulds
[uld
].add(&lli
);
2461 if (IS_ERR(handle
)) {
2462 dev_warn(adap
->pdev_dev
,
2463 "could not attach to the %s driver, error %ld\n",
2464 uld_str
[uld
], PTR_ERR(handle
));
2468 adap
->uld_handle
[uld
] = handle
;
2470 if (!netevent_registered
) {
2471 register_netevent_notifier(&cxgb4_netevent_nb
);
2472 netevent_registered
= true;
2475 if (adap
->flags
& FULL_INIT_DONE
)
2476 ulds
[uld
].state_change(handle
, CXGB4_STATE_UP
);
2479 static void attach_ulds(struct adapter
*adap
)
2483 spin_lock(&adap_rcu_lock
);
2484 list_add_tail_rcu(&adap
->rcu_node
, &adap_rcu_list
);
2485 spin_unlock(&adap_rcu_lock
);
2487 mutex_lock(&uld_mutex
);
2488 list_add_tail(&adap
->list_node
, &adapter_list
);
2489 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2491 uld_attach(adap
, i
);
2492 mutex_unlock(&uld_mutex
);
2495 static void detach_ulds(struct adapter
*adap
)
2499 mutex_lock(&uld_mutex
);
2500 list_del(&adap
->list_node
);
2501 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2502 if (adap
->uld_handle
[i
]) {
2503 ulds
[i
].state_change(adap
->uld_handle
[i
],
2504 CXGB4_STATE_DETACH
);
2505 adap
->uld_handle
[i
] = NULL
;
2507 if (netevent_registered
&& list_empty(&adapter_list
)) {
2508 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2509 netevent_registered
= false;
2511 mutex_unlock(&uld_mutex
);
2513 spin_lock(&adap_rcu_lock
);
2514 list_del_rcu(&adap
->rcu_node
);
2515 spin_unlock(&adap_rcu_lock
);
2518 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2522 mutex_lock(&uld_mutex
);
2523 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2524 if (adap
->uld_handle
[i
])
2525 ulds
[i
].state_change(adap
->uld_handle
[i
], new_state
);
2526 mutex_unlock(&uld_mutex
);
2530 * cxgb4_register_uld - register an upper-layer driver
2531 * @type: the ULD type
2532 * @p: the ULD methods
2534 * Registers an upper-layer driver with this driver and notifies the ULD
2535 * about any presently available devices that support its type. Returns
2536 * %-EBUSY if a ULD of the same type is already registered.
2538 int cxgb4_register_uld(enum cxgb4_uld type
, const struct cxgb4_uld_info
*p
)
2541 struct adapter
*adap
;
2543 if (type
>= CXGB4_ULD_MAX
)
2545 mutex_lock(&uld_mutex
);
2546 if (ulds
[type
].add
) {
2551 list_for_each_entry(adap
, &adapter_list
, list_node
)
2552 uld_attach(adap
, type
);
2553 out
: mutex_unlock(&uld_mutex
);
2556 EXPORT_SYMBOL(cxgb4_register_uld
);
2559 * cxgb4_unregister_uld - unregister an upper-layer driver
2560 * @type: the ULD type
2562 * Unregisters an existing upper-layer driver.
2564 int cxgb4_unregister_uld(enum cxgb4_uld type
)
2566 struct adapter
*adap
;
2568 if (type
>= CXGB4_ULD_MAX
)
2570 mutex_lock(&uld_mutex
);
2571 list_for_each_entry(adap
, &adapter_list
, list_node
)
2572 adap
->uld_handle
[type
] = NULL
;
2573 ulds
[type
].add
= NULL
;
2574 mutex_unlock(&uld_mutex
);
2577 EXPORT_SYMBOL(cxgb4_unregister_uld
);
2579 #if IS_ENABLED(CONFIG_IPV6)
2580 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
2581 unsigned long event
, void *data
)
2583 struct inet6_ifaddr
*ifa
= data
;
2584 struct net_device
*event_dev
= ifa
->idev
->dev
;
2585 const struct device
*parent
= NULL
;
2586 #if IS_ENABLED(CONFIG_BONDING)
2587 struct adapter
*adap
;
2589 if (event_dev
->priv_flags
& IFF_802_1Q_VLAN
)
2590 event_dev
= vlan_dev_real_dev(event_dev
);
2591 #if IS_ENABLED(CONFIG_BONDING)
2592 if (event_dev
->flags
& IFF_MASTER
) {
2593 list_for_each_entry(adap
, &adapter_list
, list_node
) {
2596 cxgb4_clip_get(adap
->port
[0],
2597 (const u32
*)ifa
, 1);
2600 cxgb4_clip_release(adap
->port
[0],
2601 (const u32
*)ifa
, 1);
2612 parent
= event_dev
->dev
.parent
;
2614 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
) {
2617 cxgb4_clip_get(event_dev
, (const u32
*)ifa
, 1);
2620 cxgb4_clip_release(event_dev
, (const u32
*)ifa
, 1);
2629 static bool inet6addr_registered
;
2630 static struct notifier_block cxgb4_inet6addr_notifier
= {
2631 .notifier_call
= cxgb4_inet6addr_handler
2634 static void update_clip(const struct adapter
*adap
)
2637 struct net_device
*dev
;
2642 for (i
= 0; i
< MAX_NPORTS
; i
++) {
2643 dev
= adap
->port
[i
];
2647 ret
= cxgb4_update_root_dev_clip(dev
);
2654 #endif /* IS_ENABLED(CONFIG_IPV6) */
2657 * cxgb_up - enable the adapter
2658 * @adap: adapter being enabled
2660 * Called when the first port is enabled, this function performs the
2661 * actions necessary to make an adapter operational, such as completing
2662 * the initialization of HW modules, and enabling interrupts.
2664 * Must be called with the rtnl lock held.
2666 static int cxgb_up(struct adapter
*adap
)
2670 err
= setup_sge_queues(adap
);
2673 err
= setup_rss(adap
);
2677 if (adap
->flags
& USING_MSIX
) {
2678 name_msix_vecs(adap
);
2679 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2680 adap
->msix_info
[0].desc
, adap
);
2684 err
= request_msix_queue_irqs(adap
);
2686 free_irq(adap
->msix_info
[0].vec
, adap
);
2690 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2691 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
2692 adap
->port
[0]->name
, adap
);
2698 t4_intr_enable(adap
);
2699 adap
->flags
|= FULL_INIT_DONE
;
2700 notify_ulds(adap
, CXGB4_STATE_UP
);
2701 #if IS_ENABLED(CONFIG_IPV6)
2707 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2709 t4_free_sge_resources(adap
);
2713 static void cxgb_down(struct adapter
*adapter
)
2715 cancel_work_sync(&adapter
->tid_release_task
);
2716 cancel_work_sync(&adapter
->db_full_task
);
2717 cancel_work_sync(&adapter
->db_drop_task
);
2718 adapter
->tid_release_task_busy
= false;
2719 adapter
->tid_release_head
= NULL
;
2721 t4_sge_stop(adapter
);
2722 t4_free_sge_resources(adapter
);
2723 adapter
->flags
&= ~FULL_INIT_DONE
;
2727 * net_device operations
2729 static int cxgb_open(struct net_device
*dev
)
2732 struct port_info
*pi
= netdev_priv(dev
);
2733 struct adapter
*adapter
= pi
->adapter
;
2735 netif_carrier_off(dev
);
2737 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
2738 err
= cxgb_up(adapter
);
2743 err
= link_start(dev
);
2745 netif_tx_start_all_queues(dev
);
2749 static int cxgb_close(struct net_device
*dev
)
2751 struct port_info
*pi
= netdev_priv(dev
);
2752 struct adapter
*adapter
= pi
->adapter
;
2754 netif_tx_stop_all_queues(dev
);
2755 netif_carrier_off(dev
);
2756 return t4_enable_vi(adapter
, adapter
->pf
, pi
->viid
, false, false);
2759 /* Return an error number if the indicated filter isn't writable ...
2761 static int writable_filter(struct filter_entry
*f
)
2771 /* Delete the filter at the specified index (if valid). The checks for all
2772 * the common problems with doing this like the filter being locked, currently
2773 * pending in another operation, etc.
2775 static int delete_filter(struct adapter
*adapter
, unsigned int fidx
)
2777 struct filter_entry
*f
;
2780 if (fidx
>= adapter
->tids
.nftids
+ adapter
->tids
.nsftids
)
2783 f
= &adapter
->tids
.ftid_tab
[fidx
];
2784 ret
= writable_filter(f
);
2788 return del_filter_wr(adapter
, fidx
);
2793 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
2794 __be32 sip
, __be16 sport
, __be16 vlan
,
2795 unsigned int queue
, unsigned char port
, unsigned char mask
)
2798 struct filter_entry
*f
;
2799 struct adapter
*adap
;
2803 adap
= netdev2adap(dev
);
2805 /* Adjust stid to correct filter index */
2806 stid
-= adap
->tids
.sftid_base
;
2807 stid
+= adap
->tids
.nftids
;
2809 /* Check to make sure the filter requested is writable ...
2811 f
= &adap
->tids
.ftid_tab
[stid
];
2812 ret
= writable_filter(f
);
2816 /* Clear out any old resources being used by the filter before
2817 * we start constructing the new filter.
2820 clear_filter(adap
, f
);
2822 /* Clear out filter specifications */
2823 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
2824 f
->fs
.val
.lport
= cpu_to_be16(sport
);
2825 f
->fs
.mask
.lport
= ~0;
2827 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
2828 for (i
= 0; i
< 4; i
++) {
2829 f
->fs
.val
.lip
[i
] = val
[i
];
2830 f
->fs
.mask
.lip
[i
] = ~0;
2832 if (adap
->params
.tp
.vlan_pri_map
& PORT_F
) {
2833 f
->fs
.val
.iport
= port
;
2834 f
->fs
.mask
.iport
= mask
;
2838 if (adap
->params
.tp
.vlan_pri_map
& PROTOCOL_F
) {
2839 f
->fs
.val
.proto
= IPPROTO_TCP
;
2840 f
->fs
.mask
.proto
= ~0;
2845 /* Mark filter as locked */
2849 ret
= set_filter_wr(adap
, stid
);
2851 clear_filter(adap
, f
);
2857 EXPORT_SYMBOL(cxgb4_create_server_filter
);
2859 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
2860 unsigned int queue
, bool ipv6
)
2863 struct filter_entry
*f
;
2864 struct adapter
*adap
;
2866 adap
= netdev2adap(dev
);
2868 /* Adjust stid to correct filter index */
2869 stid
-= adap
->tids
.sftid_base
;
2870 stid
+= adap
->tids
.nftids
;
2872 f
= &adap
->tids
.ftid_tab
[stid
];
2873 /* Unlock the filter */
2876 ret
= delete_filter(adap
, stid
);
2882 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
2884 static struct rtnl_link_stats64
*cxgb_get_stats(struct net_device
*dev
,
2885 struct rtnl_link_stats64
*ns
)
2887 struct port_stats stats
;
2888 struct port_info
*p
= netdev_priv(dev
);
2889 struct adapter
*adapter
= p
->adapter
;
2891 /* Block retrieving statistics during EEH error
2892 * recovery. Otherwise, the recovery might fail
2893 * and the PCI device will be removed permanently
2895 spin_lock(&adapter
->stats_lock
);
2896 if (!netif_device_present(dev
)) {
2897 spin_unlock(&adapter
->stats_lock
);
2900 t4_get_port_stats_offset(adapter
, p
->tx_chan
, &stats
,
2902 spin_unlock(&adapter
->stats_lock
);
2904 ns
->tx_bytes
= stats
.tx_octets
;
2905 ns
->tx_packets
= stats
.tx_frames
;
2906 ns
->rx_bytes
= stats
.rx_octets
;
2907 ns
->rx_packets
= stats
.rx_frames
;
2908 ns
->multicast
= stats
.rx_mcast_frames
;
2910 /* detailed rx_errors */
2911 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2913 ns
->rx_over_errors
= 0;
2914 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2915 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2916 ns
->rx_fifo_errors
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2917 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2918 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2919 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2920 ns
->rx_missed_errors
= 0;
2922 /* detailed tx_errors */
2923 ns
->tx_aborted_errors
= 0;
2924 ns
->tx_carrier_errors
= 0;
2925 ns
->tx_fifo_errors
= 0;
2926 ns
->tx_heartbeat_errors
= 0;
2927 ns
->tx_window_errors
= 0;
2929 ns
->tx_errors
= stats
.tx_error_frames
;
2930 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
2931 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
2935 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2938 int ret
= 0, prtad
, devad
;
2939 struct port_info
*pi
= netdev_priv(dev
);
2940 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
2944 if (pi
->mdio_addr
< 0)
2946 data
->phy_id
= pi
->mdio_addr
;
2950 if (mdio_phy_id_is_c45(data
->phy_id
)) {
2951 prtad
= mdio_phy_id_prtad(data
->phy_id
);
2952 devad
= mdio_phy_id_devad(data
->phy_id
);
2953 } else if (data
->phy_id
< 32) {
2954 prtad
= data
->phy_id
;
2956 data
->reg_num
&= 0x1f;
2960 mbox
= pi
->adapter
->pf
;
2961 if (cmd
== SIOCGMIIREG
)
2962 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
2963 data
->reg_num
, &data
->val_out
);
2965 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
2966 data
->reg_num
, data
->val_in
);
2969 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2970 sizeof(pi
->tstamp_config
)) ?
2973 if (copy_from_user(&pi
->tstamp_config
, req
->ifr_data
,
2974 sizeof(pi
->tstamp_config
)))
2977 switch (pi
->tstamp_config
.rx_filter
) {
2978 case HWTSTAMP_FILTER_NONE
:
2979 pi
->rxtstamp
= false;
2981 case HWTSTAMP_FILTER_ALL
:
2982 pi
->rxtstamp
= true;
2985 pi
->tstamp_config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2989 return copy_to_user(req
->ifr_data
, &pi
->tstamp_config
,
2990 sizeof(pi
->tstamp_config
)) ?
2998 static void cxgb_set_rxmode(struct net_device
*dev
)
3000 /* unfortunately we can't return errors to the stack */
3001 set_rxmode(dev
, -1, false);
3004 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
3007 struct port_info
*pi
= netdev_priv(dev
);
3009 if (new_mtu
< 81 || new_mtu
> MAX_MTU
) /* accommodate SACK */
3011 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
, new_mtu
, -1,
3018 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
3021 struct sockaddr
*addr
= p
;
3022 struct port_info
*pi
= netdev_priv(dev
);
3024 if (!is_valid_ether_addr(addr
->sa_data
))
3025 return -EADDRNOTAVAIL
;
3027 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->pf
, pi
->viid
,
3028 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
3032 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
3033 pi
->xact_addr_filt
= ret
;
3037 #ifdef CONFIG_NET_POLL_CONTROLLER
3038 static void cxgb_netpoll(struct net_device
*dev
)
3040 struct port_info
*pi
= netdev_priv(dev
);
3041 struct adapter
*adap
= pi
->adapter
;
3043 if (adap
->flags
& USING_MSIX
) {
3045 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
3047 for (i
= pi
->nqsets
; i
; i
--, rx
++)
3048 t4_sge_intr_msix(0, &rx
->rspq
);
3050 t4_intr_handler(adap
)(0, adap
);
3054 static const struct net_device_ops cxgb4_netdev_ops
= {
3055 .ndo_open
= cxgb_open
,
3056 .ndo_stop
= cxgb_close
,
3057 .ndo_start_xmit
= t4_eth_xmit
,
3058 .ndo_select_queue
= cxgb_select_queue
,
3059 .ndo_get_stats64
= cxgb_get_stats
,
3060 .ndo_set_rx_mode
= cxgb_set_rxmode
,
3061 .ndo_set_mac_address
= cxgb_set_mac_addr
,
3062 .ndo_set_features
= cxgb_set_features
,
3063 .ndo_validate_addr
= eth_validate_addr
,
3064 .ndo_do_ioctl
= cxgb_ioctl
,
3065 .ndo_change_mtu
= cxgb_change_mtu
,
3066 #ifdef CONFIG_NET_POLL_CONTROLLER
3067 .ndo_poll_controller
= cxgb_netpoll
,
3069 #ifdef CONFIG_CHELSIO_T4_FCOE
3070 .ndo_fcoe_enable
= cxgb_fcoe_enable
,
3071 .ndo_fcoe_disable
= cxgb_fcoe_disable
,
3072 #endif /* CONFIG_CHELSIO_T4_FCOE */
3073 #ifdef CONFIG_NET_RX_BUSY_POLL
3074 .ndo_busy_poll
= cxgb_busy_poll
,
3079 void t4_fatal_err(struct adapter
*adap
)
3081 t4_set_reg_field(adap
, SGE_CONTROL_A
, GLOBALENABLE_F
, 0);
3082 t4_intr_disable(adap
);
3083 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
3086 static void setup_memwin(struct adapter
*adap
)
3088 u32 nic_win_base
= t4_get_util_window(adap
);
3090 t4_setup_memwin(adap
, nic_win_base
, MEMWIN_NIC
);
3093 static void setup_memwin_rdma(struct adapter
*adap
)
3095 if (adap
->vres
.ocq
.size
) {
3099 start
= t4_read_pcie_cfg4(adap
, PCI_BASE_ADDRESS_2
);
3100 start
&= PCI_BASE_ADDRESS_MEM_MASK
;
3101 start
+= OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
3102 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
3104 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A
, 3),
3105 start
| BIR_V(1) | WINDOW_V(ilog2(sz_kb
)));
3107 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3),
3108 adap
->vres
.ocq
.start
);
3110 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A
, 3));
3114 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
3119 /* get device capabilities */
3120 memset(c
, 0, sizeof(*c
));
3121 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3122 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3123 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
3124 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), c
);
3128 c
->op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3129 FW_CMD_REQUEST_F
| FW_CMD_WRITE_F
);
3130 ret
= t4_wr_mbox(adap
, adap
->mbox
, c
, sizeof(*c
), NULL
);
3134 ret
= t4_config_glbl_rss(adap
, adap
->pf
,
3135 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
3136 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F
|
3137 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F
);
3141 ret
= t4_cfg_pfvf(adap
, adap
->mbox
, adap
->pf
, 0, adap
->sge
.egr_sz
, 64,
3142 MAX_INGQ
, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
,
3149 /* tweak some settings */
3150 t4_write_reg(adap
, TP_SHIFT_CNT_A
, 0x64f8849);
3151 t4_write_reg(adap
, ULP_RX_TDDP_PSZ_A
, HPZ0_V(PAGE_SHIFT
- 12));
3152 t4_write_reg(adap
, TP_PIO_ADDR_A
, TP_INGRESS_CONFIG_A
);
3153 v
= t4_read_reg(adap
, TP_PIO_DATA_A
);
3154 t4_write_reg(adap
, TP_PIO_DATA_A
, v
& ~CSUM_HAS_PSEUDO_HDR_F
);
3156 /* first 4 Tx modulation queues point to consecutive Tx channels */
3157 adap
->params
.tp
.tx_modq_map
= 0xE4;
3158 t4_write_reg(adap
, TP_TX_MOD_QUEUE_REQ_MAP_A
,
3159 TX_MOD_QUEUE_REQ_MAP_V(adap
->params
.tp
.tx_modq_map
));
3161 /* associate each Tx modulation queue with consecutive Tx channels */
3163 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3164 &v
, 1, TP_TX_SCHED_HDR_A
);
3165 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3166 &v
, 1, TP_TX_SCHED_FIFO_A
);
3167 t4_write_indirect(adap
, TP_PIO_ADDR_A
, TP_PIO_DATA_A
,
3168 &v
, 1, TP_TX_SCHED_PCMD_A
);
3170 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3171 if (is_offload(adap
)) {
3172 t4_write_reg(adap
, TP_TX_MOD_QUEUE_WEIGHT0_A
,
3173 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3174 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3175 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3176 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3177 t4_write_reg(adap
, TP_TX_MOD_CHANNEL_WEIGHT_A
,
3178 TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3179 TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3180 TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
3181 TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
3184 /* get basic stuff going */
3185 return t4_early_init(adap
, adap
->pf
);
3189 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3191 #define MAX_ATIDS 8192U
3194 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3196 * If the firmware we're dealing with has Configuration File support, then
3197 * we use that to perform all configuration
3201 * Tweak configuration based on module parameters, etc. Most of these have
3202 * defaults assigned to them by Firmware Configuration Files (if we're using
3203 * them) but need to be explicitly set if we're using hard-coded
3204 * initialization. But even in the case of using Firmware Configuration
3205 * Files, we'd like to expose the ability to change these via module
3206 * parameters so these are essentially common tweaks/settings for
3207 * Configuration Files and hard-coded initialization ...
3209 static int adap_init0_tweaks(struct adapter
*adapter
)
3212 * Fix up various Host-Dependent Parameters like Page Size, Cache
3213 * Line Size, etc. The firmware default is for a 4KB Page Size and
3214 * 64B Cache Line Size ...
3216 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
3219 * Process module parameters which affect early initialization.
3221 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
3222 dev_err(&adapter
->pdev
->dev
,
3223 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3227 t4_set_reg_field(adapter
, SGE_CONTROL_A
,
3228 PKTSHIFT_V(PKTSHIFT_M
),
3229 PKTSHIFT_V(rx_dma_offset
));
3232 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3233 * adds the pseudo header itself.
3235 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG_A
,
3236 CSUM_HAS_PSEUDO_HDR_F
, 0);
3241 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
3242 * unto themselves and they contain their own firmware to perform their
3245 static int phy_aq1202_version(const u8
*phy_fw_data
,
3250 /* At offset 0x8 you're looking for the primary image's
3251 * starting offset which is 3 Bytes wide
3253 * At offset 0xa of the primary image, you look for the offset
3254 * of the DRAM segment which is 3 Bytes wide.
3256 * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
3259 #define be16(__p) (((__p)[0] << 8) | (__p)[1])
3260 #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
3261 #define le24(__p) (le16(__p) | ((__p)[2] << 16))
3263 offset
= le24(phy_fw_data
+ 0x8) << 12;
3264 offset
= le24(phy_fw_data
+ offset
+ 0xa);
3265 return be16(phy_fw_data
+ offset
+ 0x27e);
3272 static struct info_10gbt_phy_fw
{
3273 unsigned int phy_fw_id
; /* PCI Device ID */
3274 char *phy_fw_file
; /* /lib/firmware/ PHY Firmware file */
3275 int (*phy_fw_version
)(const u8
*phy_fw_data
, size_t phy_fw_size
);
3276 int phy_flash
; /* Has FLASH for PHY Firmware */
3277 } phy_info_array
[] = {
3279 PHY_AQ1202_DEVICEID
,
3280 PHY_AQ1202_FIRMWARE
,
3285 PHY_BCM84834_DEVICEID
,
3286 PHY_BCM84834_FIRMWARE
,
3293 static struct info_10gbt_phy_fw
*find_phy_info(int devid
)
3297 for (i
= 0; i
< ARRAY_SIZE(phy_info_array
); i
++) {
3298 if (phy_info_array
[i
].phy_fw_id
== devid
)
3299 return &phy_info_array
[i
];
3304 /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
3305 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
3306 * we return a negative error number. If we transfer new firmware we return 1
3307 * (from t4_load_phy_fw()). If we don't do anything we return 0.
3309 static int adap_init0_phy(struct adapter
*adap
)
3311 const struct firmware
*phyf
;
3313 struct info_10gbt_phy_fw
*phy_info
;
3315 /* Use the device ID to determine which PHY file to flash.
3317 phy_info
= find_phy_info(adap
->pdev
->device
);
3319 dev_warn(adap
->pdev_dev
,
3320 "No PHY Firmware file found for this PHY\n");
3324 /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
3325 * use that. The adapter firmware provides us with a memory buffer
3326 * where we can load a PHY firmware file from the host if we want to
3327 * override the PHY firmware File in flash.
3329 ret
= request_firmware_direct(&phyf
, phy_info
->phy_fw_file
,
3332 /* For adapters without FLASH attached to PHY for their
3333 * firmware, it's obviously a fatal error if we can't get the
3334 * firmware to the adapter. For adapters with PHY firmware
3335 * FLASH storage, it's worth a warning if we can't find the
3336 * PHY Firmware but we'll neuter the error ...
3338 dev_err(adap
->pdev_dev
, "unable to find PHY Firmware image "
3339 "/lib/firmware/%s, error %d\n",
3340 phy_info
->phy_fw_file
, -ret
);
3341 if (phy_info
->phy_flash
) {
3342 int cur_phy_fw_ver
= 0;
3344 t4_phy_fw_ver(adap
, &cur_phy_fw_ver
);
3345 dev_warn(adap
->pdev_dev
, "continuing with, on-adapter "
3346 "FLASH copy, version %#x\n", cur_phy_fw_ver
);
3353 /* Load PHY Firmware onto adapter.
3355 ret
= t4_load_phy_fw(adap
, MEMWIN_NIC
, &adap
->win0_lock
,
3356 phy_info
->phy_fw_version
,
3357 (u8
*)phyf
->data
, phyf
->size
);
3359 dev_err(adap
->pdev_dev
, "PHY Firmware transfer error %d\n",
3362 int new_phy_fw_ver
= 0;
3364 if (phy_info
->phy_fw_version
)
3365 new_phy_fw_ver
= phy_info
->phy_fw_version(phyf
->data
,
3367 dev_info(adap
->pdev_dev
, "Successfully transferred PHY "
3368 "Firmware /lib/firmware/%s, version %#x\n",
3369 phy_info
->phy_fw_file
, new_phy_fw_ver
);
3372 release_firmware(phyf
);
3378 * Attempt to initialize the adapter via a Firmware Configuration File.
3380 static int adap_init0_config(struct adapter
*adapter
, int reset
)
3382 struct fw_caps_config_cmd caps_cmd
;
3383 const struct firmware
*cf
;
3384 unsigned long mtype
= 0, maddr
= 0;
3385 u32 finiver
, finicsum
, cfcsum
;
3387 int config_issued
= 0;
3388 char *fw_config_file
, fw_config_file_path
[256];
3389 char *config_name
= NULL
;
3392 * Reset device if necessary.
3395 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
3396 PIORSTMODE_F
| PIORST_F
);
3401 /* If this is a 10Gb/s-BT adapter make sure the chip-external
3402 * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
3403 * to be performed after any global adapter RESET above since some
3404 * PHYs only have local RAM copies of the PHY firmware.
3406 if (is_10gbt_device(adapter
->pdev
->device
)) {
3407 ret
= adap_init0_phy(adapter
);
3412 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3413 * then use that. Otherwise, use the configuration file stored
3414 * in the adapter flash ...
3416 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
3418 fw_config_file
= FW4_CFNAME
;
3421 fw_config_file
= FW5_CFNAME
;
3424 fw_config_file
= FW6_CFNAME
;
3427 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
3428 adapter
->pdev
->device
);
3433 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
3435 config_name
= "On FLASH";
3436 mtype
= FW_MEMTYPE_CF_FLASH
;
3437 maddr
= t4_flash_cfg_addr(adapter
);
3439 u32 params
[7], val
[7];
3441 sprintf(fw_config_file_path
,
3442 "/lib/firmware/%s", fw_config_file
);
3443 config_name
= fw_config_file_path
;
3445 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
3448 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3449 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3450 ret
= t4_query_params(adapter
, adapter
->mbox
,
3451 adapter
->pf
, 0, 1, params
, val
);
3454 * For t4_memory_rw() below addresses and
3455 * sizes have to be in terms of multiples of 4
3456 * bytes. So, if the Configuration File isn't
3457 * a multiple of 4 bytes in length we'll have
3458 * to write that out separately since we can't
3459 * guarantee that the bytes following the
3460 * residual byte in the buffer returned by
3461 * request_firmware() are zeroed out ...
3463 size_t resid
= cf
->size
& 0x3;
3464 size_t size
= cf
->size
& ~0x3;
3465 __be32
*data
= (__be32
*)cf
->data
;
3467 mtype
= FW_PARAMS_PARAM_Y_G(val
[0]);
3468 maddr
= FW_PARAMS_PARAM_Z_G(val
[0]) << 16;
3470 spin_lock(&adapter
->win0_lock
);
3471 ret
= t4_memory_rw(adapter
, 0, mtype
, maddr
,
3472 size
, data
, T4_MEMORY_WRITE
);
3473 if (ret
== 0 && resid
!= 0) {
3480 last
.word
= data
[size
>> 2];
3481 for (i
= resid
; i
< 4; i
++)
3483 ret
= t4_memory_rw(adapter
, 0, mtype
,
3488 spin_unlock(&adapter
->win0_lock
);
3492 release_firmware(cf
);
3498 * Issue a Capability Configuration command to the firmware to get it
3499 * to parse the Configuration File. We don't use t4_fw_config_file()
3500 * because we want the ability to modify various features after we've
3501 * processed the configuration file ...
3503 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3504 caps_cmd
.op_to_write
=
3505 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3508 caps_cmd
.cfvalid_to_len16
=
3509 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F
|
3510 FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype
) |
3511 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr
>> 16) |
3512 FW_LEN16(caps_cmd
));
3513 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3516 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
3517 * Configuration File in FLASH), our last gasp effort is to use the
3518 * Firmware Configuration File which is embedded in the firmware. A
3519 * very few early versions of the firmware didn't have one embedded
3520 * but we can ignore those.
3522 if (ret
== -ENOENT
) {
3523 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3524 caps_cmd
.op_to_write
=
3525 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3528 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3529 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
3530 sizeof(caps_cmd
), &caps_cmd
);
3531 config_name
= "Firmware Default";
3538 finiver
= ntohl(caps_cmd
.finiver
);
3539 finicsum
= ntohl(caps_cmd
.finicsum
);
3540 cfcsum
= ntohl(caps_cmd
.cfcsum
);
3541 if (finicsum
!= cfcsum
)
3542 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
3543 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3547 * And now tell the firmware to use the configuration we just loaded.
3549 caps_cmd
.op_to_write
=
3550 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3553 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3554 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3560 * Tweak configuration based on system architecture, module
3563 ret
= adap_init0_tweaks(adapter
);
3568 * And finally tell the firmware to initialize itself using the
3569 * parameters from the Configuration File.
3571 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
3575 /* Emit Firmware Configuration File information and return
3578 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
3579 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
3580 config_name
, finiver
, cfcsum
);
3584 * Something bad happened. Return the error ... (If the "error"
3585 * is that there's no Configuration File on the adapter we don't
3586 * want to issue a warning since this is fairly common.)
3589 if (config_issued
&& ret
!= -ENOENT
)
3590 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
3595 static struct fw_info fw_info_array
[] = {
3598 .fs_name
= FW4_CFNAME
,
3599 .fw_mod_name
= FW4_FNAME
,
3601 .chip
= FW_HDR_CHIP_T4
,
3602 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
3603 .intfver_nic
= FW_INTFVER(T4
, NIC
),
3604 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
3605 .intfver_ri
= FW_INTFVER(T4
, RI
),
3606 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
3607 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
3611 .fs_name
= FW5_CFNAME
,
3612 .fw_mod_name
= FW5_FNAME
,
3614 .chip
= FW_HDR_CHIP_T5
,
3615 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
3616 .intfver_nic
= FW_INTFVER(T5
, NIC
),
3617 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
3618 .intfver_ri
= FW_INTFVER(T5
, RI
),
3619 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
3620 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
3624 .fs_name
= FW6_CFNAME
,
3625 .fw_mod_name
= FW6_FNAME
,
3627 .chip
= FW_HDR_CHIP_T6
,
3628 .fw_ver
= __cpu_to_be32(FW_VERSION(T6
)),
3629 .intfver_nic
= FW_INTFVER(T6
, NIC
),
3630 .intfver_vnic
= FW_INTFVER(T6
, VNIC
),
3631 .intfver_ofld
= FW_INTFVER(T6
, OFLD
),
3632 .intfver_ri
= FW_INTFVER(T6
, RI
),
3633 .intfver_iscsipdu
= FW_INTFVER(T6
, ISCSIPDU
),
3634 .intfver_iscsi
= FW_INTFVER(T6
, ISCSI
),
3635 .intfver_fcoepdu
= FW_INTFVER(T6
, FCOEPDU
),
3636 .intfver_fcoe
= FW_INTFVER(T6
, FCOE
),
3642 static struct fw_info
*find_fw_info(int chip
)
3646 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
3647 if (fw_info_array
[i
].chip
== chip
)
3648 return &fw_info_array
[i
];
3654 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3656 static int adap_init0(struct adapter
*adap
)
3660 enum dev_state state
;
3661 u32 params
[7], val
[7];
3662 struct fw_caps_config_cmd caps_cmd
;
3665 /* Grab Firmware Device Log parameters as early as possible so we have
3666 * access to it for debugging, etc.
3668 ret
= t4_init_devlog_params(adap
);
3672 /* Contact FW, advertising Master capability */
3673 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->mbox
, MASTER_MAY
, &state
);
3675 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
3679 if (ret
== adap
->mbox
)
3680 adap
->flags
|= MASTER_PF
;
3683 * If we're the Master PF Driver and the device is uninitialized,
3684 * then let's consider upgrading the firmware ... (We always want
3685 * to check the firmware version number in order to A. get it for
3686 * later reporting and B. to warn if the currently loaded firmware
3687 * is excessively mismatched relative to the driver.)
3689 t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
3690 t4_get_tp_version(adap
, &adap
->params
.tp_vers
);
3691 ret
= t4_check_fw_version(adap
);
3692 /* If firmware is too old (not supported by driver) force an update. */
3694 state
= DEV_STATE_UNINIT
;
3695 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
3696 struct fw_info
*fw_info
;
3697 struct fw_hdr
*card_fw
;
3698 const struct firmware
*fw
;
3699 const u8
*fw_data
= NULL
;
3700 unsigned int fw_size
= 0;
3702 /* This is the firmware whose headers the driver was compiled
3705 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3706 if (fw_info
== NULL
) {
3707 dev_err(adap
->pdev_dev
,
3708 "unable to get firmware info for chip %d.\n",
3709 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
3713 /* allocate memory to read the header of the firmware on the
3716 card_fw
= t4_alloc_mem(sizeof(*card_fw
));
3718 /* Get FW from from /lib/firmware/ */
3719 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
3722 dev_err(adap
->pdev_dev
,
3723 "unable to load firmware image %s, error %d\n",
3724 fw_info
->fw_mod_name
, ret
);
3730 /* upgrade FW logic */
3731 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
3735 release_firmware(fw
);
3736 t4_free_mem(card_fw
);
3743 * Grab VPD parameters. This should be done after we establish a
3744 * connection to the firmware since some of the VPD parameters
3745 * (notably the Core Clock frequency) are retrieved via requests to
3746 * the firmware. On the other hand, we need these fairly early on
3747 * so we do this right after getting ahold of the firmware.
3749 ret
= t4_get_vpd_params(adap
, &adap
->params
.vpd
);
3754 * Find out what ports are available to us. Note that we need to do
3755 * this before calling adap_init0_no_config() since it needs nports
3759 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3760 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC
);
3761 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, &v
, &port_vec
);
3765 adap
->params
.nports
= hweight32(port_vec
);
3766 adap
->params
.portvec
= port_vec
;
3768 /* If the firmware is initialized already, emit a simply note to that
3769 * effect. Otherwise, it's time to try initializing the adapter.
3771 if (state
== DEV_STATE_INIT
) {
3772 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
3773 "Adapter already initialized\n",
3774 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
3776 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
3777 "Initializing adapter\n");
3779 /* Find out whether we're dealing with a version of the
3780 * firmware which has configuration file support.
3782 params
[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV
) |
3783 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF
));
3784 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 1,
3787 /* If the firmware doesn't support Configuration Files,
3791 dev_err(adap
->pdev_dev
, "firmware doesn't support "
3792 "Firmware Configuration Files\n");
3796 /* The firmware provides us with a memory buffer where we can
3797 * load a Configuration File from the host if we want to
3798 * override the Configuration File in flash.
3800 ret
= adap_init0_config(adap
, reset
);
3801 if (ret
== -ENOENT
) {
3802 dev_err(adap
->pdev_dev
, "no Configuration File "
3803 "present on adapter.\n");
3807 dev_err(adap
->pdev_dev
, "could not initialize "
3808 "adapter, error %d\n", -ret
);
3813 /* Give the SGE code a chance to pull in anything that it needs ...
3814 * Note that this must be called after we retrieve our VPD parameters
3815 * in order to know how to convert core ticks to seconds, etc.
3817 ret
= t4_sge_init(adap
);
3821 if (is_bypass_device(adap
->pdev
->device
))
3822 adap
->params
.bypass
= 1;
3825 * Grab some of our basic fundamental operating parameters.
3827 #define FW_PARAM_DEV(param) \
3828 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
3829 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
3831 #define FW_PARAM_PFVF(param) \
3832 FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
3833 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
3834 FW_PARAMS_PARAM_Y_V(0) | \
3835 FW_PARAMS_PARAM_Z_V(0)
3837 params
[0] = FW_PARAM_PFVF(EQ_START
);
3838 params
[1] = FW_PARAM_PFVF(L2T_START
);
3839 params
[2] = FW_PARAM_PFVF(L2T_END
);
3840 params
[3] = FW_PARAM_PFVF(FILTER_START
);
3841 params
[4] = FW_PARAM_PFVF(FILTER_END
);
3842 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
3843 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
, val
);
3846 adap
->sge
.egr_start
= val
[0];
3847 adap
->l2t_start
= val
[1];
3848 adap
->l2t_end
= val
[2];
3849 adap
->tids
.ftid_base
= val
[3];
3850 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
3851 adap
->sge
.ingr_start
= val
[5];
3853 /* qids (ingress/egress) returned from firmware can be anywhere
3854 * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
3855 * Hence driver needs to allocate memory for this range to
3856 * store the queue info. Get the highest IQFLINT/EQ index returned
3857 * in FW_EQ_*_CMD.alloc command.
3859 params
[0] = FW_PARAM_PFVF(EQ_END
);
3860 params
[1] = FW_PARAM_PFVF(IQFLINT_END
);
3861 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3864 adap
->sge
.egr_sz
= val
[0] - adap
->sge
.egr_start
+ 1;
3865 adap
->sge
.ingr_sz
= val
[1] - adap
->sge
.ingr_start
+ 1;
3867 adap
->sge
.egr_map
= kcalloc(adap
->sge
.egr_sz
,
3868 sizeof(*adap
->sge
.egr_map
), GFP_KERNEL
);
3869 if (!adap
->sge
.egr_map
) {
3874 adap
->sge
.ingr_map
= kcalloc(adap
->sge
.ingr_sz
,
3875 sizeof(*adap
->sge
.ingr_map
), GFP_KERNEL
);
3876 if (!adap
->sge
.ingr_map
) {
3881 /* Allocate the memory for the vaious egress queue bitmaps
3882 * ie starving_fl, txq_maperr and blocked_fl.
3884 adap
->sge
.starving_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3885 sizeof(long), GFP_KERNEL
);
3886 if (!adap
->sge
.starving_fl
) {
3891 adap
->sge
.txq_maperr
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3892 sizeof(long), GFP_KERNEL
);
3893 if (!adap
->sge
.txq_maperr
) {
3898 #ifdef CONFIG_DEBUG_FS
3899 adap
->sge
.blocked_fl
= kcalloc(BITS_TO_LONGS(adap
->sge
.egr_sz
),
3900 sizeof(long), GFP_KERNEL
);
3901 if (!adap
->sge
.blocked_fl
) {
3907 params
[0] = FW_PARAM_PFVF(CLIP_START
);
3908 params
[1] = FW_PARAM_PFVF(CLIP_END
);
3909 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3912 adap
->clipt_start
= val
[0];
3913 adap
->clipt_end
= val
[1];
3915 /* query params related to active filter region */
3916 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
3917 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
3918 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
, val
);
3919 /* If Active filter size is set we enable establishing
3920 * offload connection through firmware work request
3922 if ((val
[0] != val
[1]) && (ret
>= 0)) {
3923 adap
->flags
|= FW_OFLD_CONN
;
3924 adap
->tids
.aftid_base
= val
[0];
3925 adap
->tids
.aftid_end
= val
[1];
3928 /* If we're running on newer firmware, let it know that we're
3929 * prepared to deal with encapsulated CPL messages. Older
3930 * firmware won't understand this and we'll just get
3931 * unencapsulated messages ...
3933 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
3935 (void)t4_set_params(adap
, adap
->mbox
, adap
->pf
, 0, 1, params
, val
);
3938 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
3939 * capability. Earlier versions of the firmware didn't have the
3940 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
3941 * permission to use ULPTX MEMWRITE DSGL.
3943 if (is_t4(adap
->params
.chip
)) {
3944 adap
->params
.ulptx_memwrite_dsgl
= false;
3946 params
[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL
);
3947 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0,
3949 adap
->params
.ulptx_memwrite_dsgl
= (ret
== 0 && val
[0] != 0);
3953 * Get device capabilities so we can determine what resources we need
3956 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
3957 caps_cmd
.op_to_write
= htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD
) |
3958 FW_CMD_REQUEST_F
| FW_CMD_READ_F
);
3959 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
3960 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
3965 if (caps_cmd
.ofldcaps
) {
3966 /* query offload-related parameters */
3967 params
[0] = FW_PARAM_DEV(NTID
);
3968 params
[1] = FW_PARAM_PFVF(SERVER_START
);
3969 params
[2] = FW_PARAM_PFVF(SERVER_END
);
3970 params
[3] = FW_PARAM_PFVF(TDDP_START
);
3971 params
[4] = FW_PARAM_PFVF(TDDP_END
);
3972 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
3973 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
3977 adap
->tids
.ntids
= val
[0];
3978 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
3979 adap
->tids
.stid_base
= val
[1];
3980 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
3982 * Setup server filter region. Divide the available filter
3983 * region into two parts. Regular filters get 1/3rd and server
3984 * filters get 2/3rd part. This is only enabled if workarond
3986 * 1. For regular filters.
3987 * 2. Server filter: This are special filters which are used
3988 * to redirect SYN packets to offload queue.
3990 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
3991 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
3992 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
3993 adap
->tids
.nsftids
= adap
->tids
.nftids
-
3994 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
3995 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
3996 adap
->tids
.ftid_base
;
3998 adap
->vres
.ddp
.start
= val
[3];
3999 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
4000 adap
->params
.ofldq_wr_cred
= val
[5];
4002 adap
->params
.offload
= 1;
4004 if (caps_cmd
.rdmacaps
) {
4005 params
[0] = FW_PARAM_PFVF(STAG_START
);
4006 params
[1] = FW_PARAM_PFVF(STAG_END
);
4007 params
[2] = FW_PARAM_PFVF(RQ_START
);
4008 params
[3] = FW_PARAM_PFVF(RQ_END
);
4009 params
[4] = FW_PARAM_PFVF(PBL_START
);
4010 params
[5] = FW_PARAM_PFVF(PBL_END
);
4011 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6,
4015 adap
->vres
.stag
.start
= val
[0];
4016 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
4017 adap
->vres
.rq
.start
= val
[2];
4018 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
4019 adap
->vres
.pbl
.start
= val
[4];
4020 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
4022 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
4023 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
4024 params
[2] = FW_PARAM_PFVF(CQ_START
);
4025 params
[3] = FW_PARAM_PFVF(CQ_END
);
4026 params
[4] = FW_PARAM_PFVF(OCQ_START
);
4027 params
[5] = FW_PARAM_PFVF(OCQ_END
);
4028 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 6, params
,
4032 adap
->vres
.qp
.start
= val
[0];
4033 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
4034 adap
->vres
.cq
.start
= val
[2];
4035 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
4036 adap
->vres
.ocq
.start
= val
[4];
4037 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
4039 params
[0] = FW_PARAM_DEV(MAXORDIRD_QP
);
4040 params
[1] = FW_PARAM_DEV(MAXIRD_ADAPTER
);
4041 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2, params
,
4044 adap
->params
.max_ordird_qp
= 8;
4045 adap
->params
.max_ird_adapter
= 32 * adap
->tids
.ntids
;
4048 adap
->params
.max_ordird_qp
= val
[0];
4049 adap
->params
.max_ird_adapter
= val
[1];
4051 dev_info(adap
->pdev_dev
,
4052 "max_ordird_qp %d max_ird_adapter %d\n",
4053 adap
->params
.max_ordird_qp
,
4054 adap
->params
.max_ird_adapter
);
4056 if (caps_cmd
.iscsicaps
) {
4057 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
4058 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
4059 ret
= t4_query_params(adap
, adap
->mbox
, adap
->pf
, 0, 2,
4063 adap
->vres
.iscsi
.start
= val
[0];
4064 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
4066 #undef FW_PARAM_PFVF
4069 /* The MTU/MSS Table is initialized by now, so load their values. If
4070 * we're initializing the adapter, then we'll make any modifications
4071 * we want to the MTU/MSS Table and also initialize the congestion
4074 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
4075 if (state
!= DEV_STATE_INIT
) {
4078 /* The default MTU Table contains values 1492 and 1500.
4079 * However, for TCP, it's better to have two values which are
4080 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
4081 * This allows us to have a TCP Data Payload which is a
4082 * multiple of 8 regardless of what combination of TCP Options
4083 * are in use (always a multiple of 4 bytes) which is
4084 * important for performance reasons. For instance, if no
4085 * options are in use, then we have a 20-byte IP header and a
4086 * 20-byte TCP header. In this case, a 1500-byte MSS would
4087 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
4088 * which is not a multiple of 8. So using an MSS of 1488 in
4089 * this case results in a TCP Data Payload of 1448 bytes which
4090 * is a multiple of 8. On the other hand, if 12-byte TCP Time
4091 * Stamps have been negotiated, then an MTU of 1500 bytes
4092 * results in a TCP Data Payload of 1448 bytes which, as
4093 * above, is a multiple of 8 bytes ...
4095 for (i
= 0; i
< NMTUS
; i
++)
4096 if (adap
->params
.mtus
[i
] == 1492) {
4097 adap
->params
.mtus
[i
] = 1488;
4101 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4102 adap
->params
.b_wnd
);
4104 t4_init_sge_params(adap
);
4105 adap
->flags
|= FW_OK
;
4106 t4_init_tp_params(adap
);
4110 * Something bad happened. If a command timed out or failed with EIO
4111 * FW does not operate within its spec or something catastrophic
4112 * happened to HW/FW, stop issuing commands.
4115 kfree(adap
->sge
.egr_map
);
4116 kfree(adap
->sge
.ingr_map
);
4117 kfree(adap
->sge
.starving_fl
);
4118 kfree(adap
->sge
.txq_maperr
);
4119 #ifdef CONFIG_DEBUG_FS
4120 kfree(adap
->sge
.blocked_fl
);
4122 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
4123 t4_fw_bye(adap
, adap
->mbox
);
4129 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
4130 pci_channel_state_t state
)
4133 struct adapter
*adap
= pci_get_drvdata(pdev
);
4139 adap
->flags
&= ~FW_OK
;
4140 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
4141 spin_lock(&adap
->stats_lock
);
4142 for_each_port(adap
, i
) {
4143 struct net_device
*dev
= adap
->port
[i
];
4145 netif_device_detach(dev
);
4146 netif_carrier_off(dev
);
4148 spin_unlock(&adap
->stats_lock
);
4149 disable_interrupts(adap
);
4150 if (adap
->flags
& FULL_INIT_DONE
)
4153 if ((adap
->flags
& DEV_ENABLED
)) {
4154 pci_disable_device(pdev
);
4155 adap
->flags
&= ~DEV_ENABLED
;
4157 out
: return state
== pci_channel_io_perm_failure
?
4158 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
4161 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
4164 struct fw_caps_config_cmd c
;
4165 struct adapter
*adap
= pci_get_drvdata(pdev
);
4168 pci_restore_state(pdev
);
4169 pci_save_state(pdev
);
4170 return PCI_ERS_RESULT_RECOVERED
;
4173 if (!(adap
->flags
& DEV_ENABLED
)) {
4174 if (pci_enable_device(pdev
)) {
4175 dev_err(&pdev
->dev
, "Cannot reenable PCI "
4176 "device after reset\n");
4177 return PCI_ERS_RESULT_DISCONNECT
;
4179 adap
->flags
|= DEV_ENABLED
;
4182 pci_set_master(pdev
);
4183 pci_restore_state(pdev
);
4184 pci_save_state(pdev
);
4185 pci_cleanup_aer_uncorrect_error_status(pdev
);
4187 if (t4_wait_dev_ready(adap
->regs
) < 0)
4188 return PCI_ERS_RESULT_DISCONNECT
;
4189 if (t4_fw_hello(adap
, adap
->mbox
, adap
->pf
, MASTER_MUST
, NULL
) < 0)
4190 return PCI_ERS_RESULT_DISCONNECT
;
4191 adap
->flags
|= FW_OK
;
4192 if (adap_init1(adap
, &c
))
4193 return PCI_ERS_RESULT_DISCONNECT
;
4195 for_each_port(adap
, i
) {
4196 struct port_info
*p
= adap2pinfo(adap
, i
);
4198 ret
= t4_alloc_vi(adap
, adap
->mbox
, p
->tx_chan
, adap
->pf
, 0, 1,
4201 return PCI_ERS_RESULT_DISCONNECT
;
4203 p
->xact_addr_filt
= -1;
4206 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
4207 adap
->params
.b_wnd
);
4210 return PCI_ERS_RESULT_DISCONNECT
;
4211 return PCI_ERS_RESULT_RECOVERED
;
4214 static void eeh_resume(struct pci_dev
*pdev
)
4217 struct adapter
*adap
= pci_get_drvdata(pdev
);
4223 for_each_port(adap
, i
) {
4224 struct net_device
*dev
= adap
->port
[i
];
4226 if (netif_running(dev
)) {
4228 cxgb_set_rxmode(dev
);
4230 netif_device_attach(dev
);
4235 static const struct pci_error_handlers cxgb4_eeh
= {
4236 .error_detected
= eeh_err_detected
,
4237 .slot_reset
= eeh_slot_reset
,
4238 .resume
= eeh_resume
,
4241 static inline bool is_x_10g_port(const struct link_config
*lc
)
4243 return (lc
->supported
& FW_PORT_CAP_SPEED_10G
) != 0 ||
4244 (lc
->supported
& FW_PORT_CAP_SPEED_40G
) != 0;
4247 static inline void init_rspq(struct adapter
*adap
, struct sge_rspq
*q
,
4248 unsigned int us
, unsigned int cnt
,
4249 unsigned int size
, unsigned int iqe_size
)
4252 cxgb4_set_rspq_intr_params(q
, us
, cnt
);
4253 q
->iqe_len
= iqe_size
;
4258 * Perform default configuration of DMA queues depending on the number and type
4259 * of ports we found and the number of available CPUs. Most settings can be
4260 * modified by the admin prior to actual use.
4262 static void cfg_queues(struct adapter
*adap
)
4264 struct sge
*s
= &adap
->sge
;
4265 int i
, n10g
= 0, qidx
= 0;
4266 #ifndef CONFIG_CHELSIO_T4_DCB
4271 for_each_port(adap
, i
)
4272 n10g
+= is_x_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
4273 #ifdef CONFIG_CHELSIO_T4_DCB
4274 /* For Data Center Bridging support we need to be able to support up
4275 * to 8 Traffic Priorities; each of which will be assigned to its
4276 * own TX Queue in order to prevent Head-Of-Line Blocking.
4278 if (adap
->params
.nports
* 8 > MAX_ETH_QSETS
) {
4279 dev_err(adap
->pdev_dev
, "MAX_ETH_QSETS=%d < %d!\n",
4280 MAX_ETH_QSETS
, adap
->params
.nports
* 8);
4284 for_each_port(adap
, i
) {
4285 struct port_info
*pi
= adap2pinfo(adap
, i
);
4287 pi
->first_qset
= qidx
;
4291 #else /* !CONFIG_CHELSIO_T4_DCB */
4293 * We default to 1 queue per non-10G port and up to # of cores queues
4297 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
4298 if (q10g
> netif_get_num_default_rss_queues())
4299 q10g
= netif_get_num_default_rss_queues();
4301 for_each_port(adap
, i
) {
4302 struct port_info
*pi
= adap2pinfo(adap
, i
);
4304 pi
->first_qset
= qidx
;
4305 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
4308 #endif /* !CONFIG_CHELSIO_T4_DCB */
4311 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
4313 if (is_offload(adap
)) {
4315 * For offload we use 1 queue/channel if all ports are up to 1G,
4316 * otherwise we divide all available queues amongst the channels
4317 * capped by the number of available cores.
4320 i
= min_t(int, ARRAY_SIZE(s
->iscsirxq
),
4322 s
->iscsiqsets
= roundup(i
, adap
->params
.nports
);
4324 s
->iscsiqsets
= adap
->params
.nports
;
4325 /* For RDMA one Rx queue per channel suffices */
4326 s
->rdmaqs
= adap
->params
.nports
;
4327 /* Try and allow at least 1 CIQ per cpu rounding down
4328 * to the number of ports, with a minimum of 1 per port.
4329 * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
4330 * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
4331 * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
4333 s
->rdmaciqs
= min_t(int, MAX_RDMA_CIQS
, num_online_cpus());
4334 s
->rdmaciqs
= (s
->rdmaciqs
/ adap
->params
.nports
) *
4335 adap
->params
.nports
;
4336 s
->rdmaciqs
= max_t(int, s
->rdmaciqs
, adap
->params
.nports
);
4338 if (!is_t4(adap
->params
.chip
))
4339 s
->niscsitq
= s
->iscsiqsets
;
4342 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
4343 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
4345 init_rspq(adap
, &r
->rspq
, 5, 10, 1024, 64);
4349 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
4350 s
->ethtxq
[i
].q
.size
= 1024;
4352 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
4353 s
->ctrlq
[i
].q
.size
= 512;
4355 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++)
4356 s
->ofldtxq
[i
].q
.size
= 1024;
4358 for (i
= 0; i
< ARRAY_SIZE(s
->iscsirxq
); i
++) {
4359 struct sge_ofld_rxq
*r
= &s
->iscsirxq
[i
];
4361 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
4362 r
->rspq
.uld
= CXGB4_ULD_ISCSI
;
4366 if (!is_t4(adap
->params
.chip
)) {
4367 for (i
= 0; i
< ARRAY_SIZE(s
->iscsitrxq
); i
++) {
4368 struct sge_ofld_rxq
*r
= &s
->iscsitrxq
[i
];
4370 init_rspq(adap
, &r
->rspq
, 5, 1, 1024, 64);
4371 r
->rspq
.uld
= CXGB4_ULD_ISCSIT
;
4376 for (i
= 0; i
< ARRAY_SIZE(s
->rdmarxq
); i
++) {
4377 struct sge_ofld_rxq
*r
= &s
->rdmarxq
[i
];
4379 init_rspq(adap
, &r
->rspq
, 5, 1, 511, 64);
4380 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4384 ciq_size
= 64 + adap
->vres
.cq
.size
+ adap
->tids
.nftids
;
4385 if (ciq_size
> SGE_MAX_IQ_SIZE
) {
4386 CH_WARN(adap
, "CIQ size too small for available IQs\n");
4387 ciq_size
= SGE_MAX_IQ_SIZE
;
4390 for (i
= 0; i
< ARRAY_SIZE(s
->rdmaciq
); i
++) {
4391 struct sge_ofld_rxq
*r
= &s
->rdmaciq
[i
];
4393 init_rspq(adap
, &r
->rspq
, 5, 1, ciq_size
, 64);
4394 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
4397 init_rspq(adap
, &s
->fw_evtq
, 0, 1, 1024, 64);
4398 init_rspq(adap
, &s
->intrq
, 0, 1, 2 * MAX_INGQ
, 64);
4402 * Reduce the number of Ethernet queues across all ports to at most n.
4403 * n provides at least one queue per port.
4405 static void reduce_ethqs(struct adapter
*adap
, int n
)
4408 struct port_info
*pi
;
4410 while (n
< adap
->sge
.ethqsets
)
4411 for_each_port(adap
, i
) {
4412 pi
= adap2pinfo(adap
, i
);
4413 if (pi
->nqsets
> 1) {
4415 adap
->sge
.ethqsets
--;
4416 if (adap
->sge
.ethqsets
<= n
)
4422 for_each_port(adap
, i
) {
4423 pi
= adap2pinfo(adap
, i
);
4429 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4430 #define EXTRA_VECS 2
4432 static int enable_msix(struct adapter
*adap
)
4435 int i
, want
, need
, allocated
;
4436 struct sge
*s
= &adap
->sge
;
4437 unsigned int nchan
= adap
->params
.nports
;
4438 struct msix_entry
*entries
;
4440 entries
= kmalloc(sizeof(*entries
) * (MAX_INGQ
+ 1),
4445 for (i
= 0; i
< MAX_INGQ
+ 1; ++i
)
4446 entries
[i
].entry
= i
;
4448 want
= s
->max_ethqsets
+ EXTRA_VECS
;
4449 if (is_offload(adap
)) {
4450 want
+= s
->rdmaqs
+ s
->rdmaciqs
+ s
->iscsiqsets
+
4452 /* need nchan for each possible ULD */
4453 if (is_t4(adap
->params
.chip
))
4454 ofld_need
= 3 * nchan
;
4456 ofld_need
= 4 * nchan
;
4458 #ifdef CONFIG_CHELSIO_T4_DCB
4459 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
4462 need
= 8 * adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
4464 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
4466 allocated
= pci_enable_msix_range(adap
->pdev
, entries
, need
, want
);
4467 if (allocated
< 0) {
4468 dev_info(adap
->pdev_dev
, "not enough MSI-X vectors left,"
4469 " not using MSI-X\n");
4474 /* Distribute available vectors to the various queue groups.
4475 * Every group gets its minimum requirement and NIC gets top
4476 * priority for leftovers.
4478 i
= allocated
- EXTRA_VECS
- ofld_need
;
4479 if (i
< s
->max_ethqsets
) {
4480 s
->max_ethqsets
= i
;
4481 if (i
< s
->ethqsets
)
4482 reduce_ethqs(adap
, i
);
4484 if (is_offload(adap
)) {
4485 if (allocated
< want
) {
4487 s
->rdmaciqs
= nchan
;
4489 if (!is_t4(adap
->params
.chip
))
4490 s
->niscsitq
= nchan
;
4493 /* leftovers go to OFLD */
4494 i
= allocated
- EXTRA_VECS
- s
->max_ethqsets
-
4495 s
->rdmaqs
- s
->rdmaciqs
- s
->niscsitq
;
4496 s
->iscsiqsets
= (i
/ nchan
) * nchan
; /* round down */
4499 for (i
= 0; i
< allocated
; ++i
)
4500 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
4501 dev_info(adap
->pdev_dev
, "%d MSI-X vectors allocated, "
4502 "nic %d iscsi %d rdma cpl %d rdma ciq %d\n",
4503 allocated
, s
->max_ethqsets
, s
->iscsiqsets
, s
->rdmaqs
,
4512 static int init_rss(struct adapter
*adap
)
4517 err
= t4_init_rss_mode(adap
, adap
->mbox
);
4521 for_each_port(adap
, i
) {
4522 struct port_info
*pi
= adap2pinfo(adap
, i
);
4524 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
4531 static int cxgb4_get_pcie_dev_link_caps(struct adapter
*adap
,
4532 enum pci_bus_speed
*speed
,
4533 enum pcie_link_width
*width
)
4535 u32 lnkcap1
, lnkcap2
;
4538 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
4540 *speed
= PCI_SPEED_UNKNOWN
;
4541 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
4543 err1
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP
,
4545 err2
= pcie_capability_read_dword(adap
->pdev
, PCI_EXP_LNKCAP2
,
4547 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
4548 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
4549 *speed
= PCIE_SPEED_8_0GT
;
4550 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
4551 *speed
= PCIE_SPEED_5_0GT
;
4552 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
4553 *speed
= PCIE_SPEED_2_5GT
;
4556 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
4557 if (!lnkcap2
) { /* pre-r3.0 */
4558 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
4559 *speed
= PCIE_SPEED_5_0GT
;
4560 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
4561 *speed
= PCIE_SPEED_2_5GT
;
4565 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
)
4566 return err1
? err1
: err2
? err2
: -EINVAL
;
4570 static void cxgb4_check_pcie_caps(struct adapter
*adap
)
4572 enum pcie_link_width width
, width_cap
;
4573 enum pci_bus_speed speed
, speed_cap
;
4575 #define PCIE_SPEED_STR(speed) \
4576 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
4577 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
4578 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
4581 if (cxgb4_get_pcie_dev_link_caps(adap
, &speed_cap
, &width_cap
)) {
4582 dev_warn(adap
->pdev_dev
,
4583 "Unable to determine PCIe device BW capabilities\n");
4587 if (pcie_get_minimum_link(adap
->pdev
, &speed
, &width
) ||
4588 speed
== PCI_SPEED_UNKNOWN
|| width
== PCIE_LNK_WIDTH_UNKNOWN
) {
4589 dev_warn(adap
->pdev_dev
,
4590 "Unable to determine PCI Express bandwidth.\n");
4594 dev_info(adap
->pdev_dev
, "PCIe link speed is %s, device supports %s\n",
4595 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
4596 dev_info(adap
->pdev_dev
, "PCIe link width is x%d, device supports x%d\n",
4598 if (speed
< speed_cap
|| width
< width_cap
)
4599 dev_info(adap
->pdev_dev
,
4600 "A slot with more lanes and/or higher speed is "
4601 "suggested for optimal performance.\n");
4604 static void print_port_info(const struct net_device
*dev
)
4608 const char *spd
= "";
4609 const struct port_info
*pi
= netdev_priv(dev
);
4610 const struct adapter
*adap
= pi
->adapter
;
4612 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
4614 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
4616 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_8_0GB
)
4619 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
4620 bufp
+= sprintf(bufp
, "100/");
4621 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
4622 bufp
+= sprintf(bufp
, "1000/");
4623 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
4624 bufp
+= sprintf(bufp
, "10G/");
4625 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_40G
)
4626 bufp
+= sprintf(bufp
, "40G/");
4629 sprintf(bufp
, "BASE-%s", t4_get_port_type_description(pi
->port_type
));
4631 netdev_info(dev
, "Chelsio %s rev %d %s %sNIC %s\n",
4632 adap
->params
.vpd
.id
,
4633 CHELSIO_CHIP_RELEASE(adap
->params
.chip
), buf
,
4634 is_offload(adap
) ? "R" : "",
4635 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
4636 (adap
->flags
& USING_MSI
) ? " MSI" : "");
4637 netdev_info(dev
, "S/N: %s, P/N: %s\n",
4638 adap
->params
.vpd
.sn
, adap
->params
.vpd
.pn
);
4641 static void enable_pcie_relaxed_ordering(struct pci_dev
*dev
)
4643 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
4647 * Free the following resources:
4648 * - memory used for tables
4651 * - resources FW is holding for us
4653 static void free_some_resources(struct adapter
*adapter
)
4657 t4_free_mem(adapter
->l2t
);
4658 t4_free_mem(adapter
->tids
.tid_tab
);
4659 kfree(adapter
->sge
.egr_map
);
4660 kfree(adapter
->sge
.ingr_map
);
4661 kfree(adapter
->sge
.starving_fl
);
4662 kfree(adapter
->sge
.txq_maperr
);
4663 #ifdef CONFIG_DEBUG_FS
4664 kfree(adapter
->sge
.blocked_fl
);
4666 disable_msi(adapter
);
4668 for_each_port(adapter
, i
)
4669 if (adapter
->port
[i
]) {
4670 struct port_info
*pi
= adap2pinfo(adapter
, i
);
4673 t4_free_vi(adapter
, adapter
->mbox
, adapter
->pf
,
4675 kfree(adap2pinfo(adapter
, i
)->rss
);
4676 free_netdev(adapter
->port
[i
]);
4678 if (adapter
->flags
& FW_OK
)
4679 t4_fw_bye(adapter
, adapter
->pf
);
4682 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4683 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4684 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4685 #define SEGMENT_SIZE 128
4687 static int get_chip_type(struct pci_dev
*pdev
, u32 pl_rev
)
4691 /* Retrieve adapter's device ID */
4692 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &device_id
);
4694 switch (device_id
>> 12) {
4696 return CHELSIO_CHIP_CODE(CHELSIO_T4
, pl_rev
);
4698 return CHELSIO_CHIP_CODE(CHELSIO_T5
, pl_rev
);
4700 return CHELSIO_CHIP_CODE(CHELSIO_T6
, pl_rev
);
4702 dev_err(&pdev
->dev
, "Device %d is not supported\n",
4708 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4710 int func
, i
, err
, s_qpp
, qpp
, num_seg
;
4711 struct port_info
*pi
;
4712 bool highdma
= false;
4713 struct adapter
*adapter
= NULL
;
4716 enum chip_type chip
;
4718 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
4720 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
4722 /* Just info, some other driver may have claimed the device. */
4723 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
4727 err
= pci_enable_device(pdev
);
4729 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
4730 goto out_release_regions
;
4733 regs
= pci_ioremap_bar(pdev
, 0);
4735 dev_err(&pdev
->dev
, "cannot map device registers\n");
4737 goto out_disable_device
;
4740 err
= t4_wait_dev_ready(regs
);
4742 goto out_unmap_bar0
;
4744 /* We control everything through one PF */
4745 whoami
= readl(regs
+ PL_WHOAMI_A
);
4746 pl_rev
= REV_G(readl(regs
+ PL_REV_A
));
4747 chip
= get_chip_type(pdev
, pl_rev
);
4748 func
= CHELSIO_CHIP_VERSION(chip
) <= CHELSIO_T5
?
4749 SOURCEPF_G(whoami
) : T6_SOURCEPF_G(whoami
);
4750 if (func
!= ent
->driver_data
) {
4752 pci_disable_device(pdev
);
4753 pci_save_state(pdev
); /* to restore SR-IOV later */
4757 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4759 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
4761 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
4762 "coherent allocations\n");
4763 goto out_unmap_bar0
;
4766 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4768 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
4769 goto out_unmap_bar0
;
4773 pci_enable_pcie_error_reporting(pdev
);
4774 enable_pcie_relaxed_ordering(pdev
);
4775 pci_set_master(pdev
);
4776 pci_save_state(pdev
);
4778 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
4781 goto out_unmap_bar0
;
4784 adapter
->workq
= create_singlethread_workqueue("cxgb4");
4785 if (!adapter
->workq
) {
4787 goto out_free_adapter
;
4790 /* PCI device has been enabled */
4791 adapter
->flags
|= DEV_ENABLED
;
4793 adapter
->regs
= regs
;
4794 adapter
->pdev
= pdev
;
4795 adapter
->pdev_dev
= &pdev
->dev
;
4796 adapter
->mbox
= func
;
4798 adapter
->msg_enable
= dflt_msg_enable
;
4799 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
4801 spin_lock_init(&adapter
->stats_lock
);
4802 spin_lock_init(&adapter
->tid_release_lock
);
4803 spin_lock_init(&adapter
->win0_lock
);
4805 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
4806 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
4807 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
4809 err
= t4_prep_adapter(adapter
);
4811 goto out_free_adapter
;
4814 if (!is_t4(adapter
->params
.chip
)) {
4815 s_qpp
= (QUEUESPERPAGEPF0_S
+
4816 (QUEUESPERPAGEPF1_S
- QUEUESPERPAGEPF0_S
) *
4818 qpp
= 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter
,
4819 SGE_EGRESS_QUEUES_PER_PAGE_PF_A
) >> s_qpp
);
4820 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
4822 /* Each segment size is 128B. Write coalescing is enabled only
4823 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
4824 * queue is less no of segments that can be accommodated in
4827 if (qpp
> num_seg
) {
4829 "Incorrect number of egress queues per page\n");
4831 goto out_free_adapter
;
4833 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
4834 pci_resource_len(pdev
, 2));
4835 if (!adapter
->bar2
) {
4836 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
4838 goto out_free_adapter
;
4842 setup_memwin(adapter
);
4843 err
= adap_init0(adapter
);
4844 #ifdef CONFIG_DEBUG_FS
4845 bitmap_zero(adapter
->sge
.blocked_fl
, adapter
->sge
.egr_sz
);
4847 setup_memwin_rdma(adapter
);
4851 /* configure SGE_STAT_CFG_A to read WC stats */
4852 if (!is_t4(adapter
->params
.chip
))
4853 t4_write_reg(adapter
, SGE_STAT_CFG_A
, STATSOURCE_T5_V(7) |
4854 (is_t5(adapter
->params
.chip
) ? STATMODE_V(0) :
4857 for_each_port(adapter
, i
) {
4858 struct net_device
*netdev
;
4860 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
4867 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
4869 adapter
->port
[i
] = netdev
;
4870 pi
= netdev_priv(netdev
);
4871 pi
->adapter
= adapter
;
4872 pi
->xact_addr_filt
= -1;
4874 netdev
->irq
= pdev
->irq
;
4876 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
4877 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4878 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
4879 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
4881 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
4882 netdev
->features
|= netdev
->hw_features
;
4883 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
4885 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
4887 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
4888 #ifdef CONFIG_CHELSIO_T4_DCB
4889 netdev
->dcbnl_ops
= &cxgb4_dcb_ops
;
4890 cxgb4_dcb_state_init(netdev
);
4892 cxgb4_set_ethtool_ops(netdev
);
4895 pci_set_drvdata(pdev
, adapter
);
4897 if (adapter
->flags
& FW_OK
) {
4898 err
= t4_port_init(adapter
, func
, func
, 0);
4901 } else if (adapter
->params
.nports
== 1) {
4902 /* If we don't have a connection to the firmware -- possibly
4903 * because of an error -- grab the raw VPD parameters so we
4904 * can set the proper MAC Address on the debug network
4905 * interface that we've created.
4907 u8 hw_addr
[ETH_ALEN
];
4908 u8
*na
= adapter
->params
.vpd
.na
;
4910 err
= t4_get_raw_vpd_params(adapter
, &adapter
->params
.vpd
);
4912 for (i
= 0; i
< ETH_ALEN
; i
++)
4913 hw_addr
[i
] = (hex2val(na
[2 * i
+ 0]) * 16 +
4914 hex2val(na
[2 * i
+ 1]));
4915 t4_set_hw_addr(adapter
, 0, hw_addr
);
4919 /* Configure queues and allocate tables now, they can be needed as
4920 * soon as the first register_netdev completes.
4922 cfg_queues(adapter
);
4924 adapter
->l2t
= t4_init_l2t(adapter
->l2t_start
, adapter
->l2t_end
);
4925 if (!adapter
->l2t
) {
4926 /* We tolerate a lack of L2T, giving up some functionality */
4927 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
4928 adapter
->params
.offload
= 0;
4931 #if IS_ENABLED(CONFIG_IPV6)
4932 if ((CHELSIO_CHIP_VERSION(adapter
->params
.chip
) <= CHELSIO_T5
) &&
4933 (!(t4_read_reg(adapter
, LE_DB_CONFIG_A
) & ASLIPCOMPEN_F
))) {
4934 /* CLIP functionality is not present in hardware,
4935 * hence disable all offload features
4937 dev_warn(&pdev
->dev
,
4938 "CLIP not enabled in hardware, continuing\n");
4939 adapter
->params
.offload
= 0;
4941 adapter
->clipt
= t4_init_clip_tbl(adapter
->clipt_start
,
4942 adapter
->clipt_end
);
4943 if (!adapter
->clipt
) {
4944 /* We tolerate a lack of clip_table, giving up
4945 * some functionality
4947 dev_warn(&pdev
->dev
,
4948 "could not allocate Clip table, continuing\n");
4949 adapter
->params
.offload
= 0;
4953 if (is_offload(adapter
) && tid_init(&adapter
->tids
) < 0) {
4954 dev_warn(&pdev
->dev
, "could not allocate TID table, "
4956 adapter
->params
.offload
= 0;
4959 if (is_offload(adapter
)) {
4960 if (t4_read_reg(adapter
, LE_DB_CONFIG_A
) & HASHEN_F
) {
4961 u32 hash_base
, hash_reg
;
4963 if (chip
<= CHELSIO_T5
) {
4964 hash_reg
= LE_DB_TID_HASHBASE_A
;
4965 hash_base
= t4_read_reg(adapter
, hash_reg
);
4966 adapter
->tids
.hash_base
= hash_base
/ 4;
4968 hash_reg
= T6_LE_DB_HASH_TID_BASE_A
;
4969 hash_base
= t4_read_reg(adapter
, hash_reg
);
4970 adapter
->tids
.hash_base
= hash_base
;
4975 /* See what interrupts we'll be using */
4976 if (msi
> 1 && enable_msix(adapter
) == 0)
4977 adapter
->flags
|= USING_MSIX
;
4978 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
4979 adapter
->flags
|= USING_MSI
;
4981 /* check for PCI Express bandwidth capabiltites */
4982 cxgb4_check_pcie_caps(adapter
);
4984 err
= init_rss(adapter
);
4989 * The card is now ready to go. If any errors occur during device
4990 * registration we do not fail the whole card but rather proceed only
4991 * with the ports we manage to register successfully. However we must
4992 * register at least one net device.
4994 for_each_port(adapter
, i
) {
4995 pi
= adap2pinfo(adapter
, i
);
4996 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
4997 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
4999 err
= register_netdev(adapter
->port
[i
]);
5002 adapter
->chan_map
[pi
->tx_chan
] = i
;
5003 print_port_info(adapter
->port
[i
]);
5006 dev_err(&pdev
->dev
, "could not register any net devices\n");
5010 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
5014 if (cxgb4_debugfs_root
) {
5015 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
5016 cxgb4_debugfs_root
);
5017 setup_debugfs(adapter
);
5020 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5021 pdev
->needs_freset
= 1;
5023 if (is_offload(adapter
))
5024 attach_ulds(adapter
);
5027 #ifdef CONFIG_PCI_IOV
5028 if (func
< ARRAY_SIZE(num_vf
) && num_vf
[func
] > 0)
5029 if (pci_enable_sriov(pdev
, num_vf
[func
]) == 0)
5030 dev_info(&pdev
->dev
,
5031 "instantiated %u virtual functions\n",
5037 free_some_resources(adapter
);
5039 if (!is_t4(adapter
->params
.chip
))
5040 iounmap(adapter
->bar2
);
5043 destroy_workqueue(adapter
->workq
);
5049 pci_disable_pcie_error_reporting(pdev
);
5050 pci_disable_device(pdev
);
5051 out_release_regions
:
5052 pci_release_regions(pdev
);
5056 static void remove_one(struct pci_dev
*pdev
)
5058 struct adapter
*adapter
= pci_get_drvdata(pdev
);
5060 #ifdef CONFIG_PCI_IOV
5061 pci_disable_sriov(pdev
);
5068 /* Tear down per-adapter Work Queue first since it can contain
5069 * references to our adapter data structure.
5071 destroy_workqueue(adapter
->workq
);
5073 if (is_offload(adapter
))
5074 detach_ulds(adapter
);
5076 disable_interrupts(adapter
);
5078 for_each_port(adapter
, i
)
5079 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
5080 unregister_netdev(adapter
->port
[i
]);
5082 debugfs_remove_recursive(adapter
->debugfs_root
);
5084 /* If we allocated filters, free up state associated with any
5087 if (adapter
->tids
.ftid_tab
) {
5088 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[0];
5089 for (i
= 0; i
< (adapter
->tids
.nftids
+
5090 adapter
->tids
.nsftids
); i
++, f
++)
5092 clear_filter(adapter
, f
);
5095 if (adapter
->flags
& FULL_INIT_DONE
)
5098 free_some_resources(adapter
);
5099 #if IS_ENABLED(CONFIG_IPV6)
5100 t4_cleanup_clip_tbl(adapter
);
5102 iounmap(adapter
->regs
);
5103 if (!is_t4(adapter
->params
.chip
))
5104 iounmap(adapter
->bar2
);
5105 pci_disable_pcie_error_reporting(pdev
);
5106 if ((adapter
->flags
& DEV_ENABLED
)) {
5107 pci_disable_device(pdev
);
5108 adapter
->flags
&= ~DEV_ENABLED
;
5110 pci_release_regions(pdev
);
5114 pci_release_regions(pdev
);
5117 static struct pci_driver cxgb4_driver
= {
5118 .name
= KBUILD_MODNAME
,
5119 .id_table
= cxgb4_pci_tbl
,
5121 .remove
= remove_one
,
5122 .shutdown
= remove_one
,
5123 .err_handler
= &cxgb4_eeh
,
5126 static int __init
cxgb4_init_module(void)
5130 /* Debugfs support is optional, just warn if this fails */
5131 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
5132 if (!cxgb4_debugfs_root
)
5133 pr_warn("could not create debugfs entry, continuing\n");
5135 ret
= pci_register_driver(&cxgb4_driver
);
5137 debugfs_remove(cxgb4_debugfs_root
);
5139 #if IS_ENABLED(CONFIG_IPV6)
5140 if (!inet6addr_registered
) {
5141 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5142 inet6addr_registered
= true;
5149 static void __exit
cxgb4_cleanup_module(void)
5151 #if IS_ENABLED(CONFIG_IPV6)
5152 if (inet6addr_registered
) {
5153 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
5154 inet6addr_registered
= false;
5157 pci_unregister_driver(&cxgb4_driver
);
5158 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
5161 module_init(cxgb4_init_module
);
5162 module_exit(cxgb4_cleanup_module
);