net: vlan: rename NETIF_F_HW_VLAN_* feature flags to NETIF_F_HW_VLAN_CTAG_*
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4vf / cxgb4vf_main.c
CommitLineData
be839e39
CL
1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
428ac43f
JP
36#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37
be839e39
CL
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/init.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/debugfs.h>
46#include <linux/ethtool.h>
47
48#include "t4vf_common.h"
49#include "t4vf_defs.h"
50
51#include "../cxgb4/t4_regs.h"
52#include "../cxgb4/t4_msg.h"
53
54/*
55 * Generic information about the driver.
56 */
622c62b5
SR
57#define DRV_VERSION "2.0.0-ko"
58#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
be839e39
CL
59
60/*
61 * Module Parameters.
62 * ==================
63 */
64
65/*
66 * Default ethtool "message level" for adapters.
67 */
68#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
69 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
70 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
71
72static int dflt_msg_enable = DFLT_MSG_ENABLE;
73
74module_param(dflt_msg_enable, int, 0644);
75MODULE_PARM_DESC(dflt_msg_enable,
76 "default adapter ethtool message level bitmap");
77
78/*
79 * The driver uses the best interrupt scheme available on a platform in the
80 * order MSI-X then MSI. This parameter determines which of these schemes the
81 * driver may consider as follows:
82 *
83 * msi = 2: choose from among MSI-X and MSI
84 * msi = 1: only consider MSI interrupts
85 *
86 * Note that unlike the Physical Function driver, this Virtual Function driver
87 * does _not_ support legacy INTx interrupts (this limitation is mandated by
88 * the PCI-E SR-IOV standard).
89 */
90#define MSI_MSIX 2
91#define MSI_MSI 1
92#define MSI_DEFAULT MSI_MSIX
93
94static int msi = MSI_DEFAULT;
95
96module_param(msi, int, 0644);
97MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
98
99/*
100 * Fundamental constants.
101 * ======================
102 */
103
104enum {
105 MAX_TXQ_ENTRIES = 16384,
106 MAX_RSPQ_ENTRIES = 16384,
107 MAX_RX_BUFFERS = 16384,
108
109 MIN_TXQ_ENTRIES = 32,
110 MIN_RSPQ_ENTRIES = 128,
111 MIN_FL_ENTRIES = 16,
112
113 /*
114 * For purposes of manipulating the Free List size we need to
115 * recognize that Free Lists are actually Egress Queues (the host
116 * produces free buffers which the hardware consumes), Egress Queues
117 * indices are all in units of Egress Context Units bytes, and free
118 * list entries are 64-bit PCI DMA addresses. And since the state of
119 * the Producer Index == the Consumer Index implies an EMPTY list, we
120 * always have at least one Egress Unit's worth of Free List entries
121 * unused. See sge.c for more details ...
122 */
123 EQ_UNIT = SGE_EQ_IDXSIZE,
124 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
125 MIN_FL_RESID = FL_PER_EQ_UNIT,
126};
127
128/*
129 * Global driver state.
130 * ====================
131 */
132
133static struct dentry *cxgb4vf_debugfs_root;
134
135/*
136 * OS "Callback" functions.
137 * ========================
138 */
139
140/*
141 * The link status has changed on the indicated "port" (Virtual Interface).
142 */
143void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
144{
145 struct net_device *dev = adapter->port[pidx];
146
147 /*
148 * If the port is disabled or the current recorded "link up"
149 * status matches the new status, just return.
150 */
151 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
152 return;
153
154 /*
155 * Tell the OS that the link status has changed and print a short
156 * informative message on the console about the event.
157 */
158 if (link_ok) {
159 const char *s;
160 const char *fc;
161 const struct port_info *pi = netdev_priv(dev);
162
163 netif_carrier_on(dev);
164
165 switch (pi->link_cfg.speed) {
166 case SPEED_10000:
167 s = "10Gbps";
168 break;
169
170 case SPEED_1000:
171 s = "1000Mbps";
172 break;
173
174 case SPEED_100:
175 s = "100Mbps";
176 break;
177
178 default:
179 s = "unknown";
180 break;
181 }
182
183 switch (pi->link_cfg.fc) {
184 case PAUSE_RX:
185 fc = "RX";
186 break;
187
188 case PAUSE_TX:
189 fc = "TX";
190 break;
191
192 case PAUSE_RX|PAUSE_TX:
193 fc = "RX/TX";
194 break;
195
196 default:
197 fc = "no";
198 break;
199 }
200
428ac43f 201 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
be839e39
CL
202 } else {
203 netif_carrier_off(dev);
428ac43f 204 netdev_info(dev, "link down\n");
be839e39
CL
205 }
206}
207
208/*
209 * Net device operations.
210 * ======================
211 */
212
be839e39 213
87737663 214
be839e39
CL
215
216/*
217 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
218 * Interface).
219 */
220static int link_start(struct net_device *dev)
221{
222 int ret;
223 struct port_info *pi = netdev_priv(dev);
224
225 /*
226 * We do not set address filters and promiscuity here, the stack does
87737663 227 * that step explicitly. Enable vlan accel.
be839e39 228 */
87737663 229 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
be839e39
CL
230 true);
231 if (ret == 0) {
232 ret = t4vf_change_mac(pi->adapter, pi->viid,
233 pi->xact_addr_filt, dev->dev_addr, true);
234 if (ret >= 0) {
235 pi->xact_addr_filt = ret;
236 ret = 0;
237 }
238 }
239
240 /*
241 * We don't need to actually "start the link" itself since the
242 * firmware will do that for us when the first Virtual Interface
243 * is enabled on a port.
244 */
245 if (ret == 0)
246 ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
247 return ret;
248}
249
250/*
251 * Name the MSI-X interrupts.
252 */
253static void name_msix_vecs(struct adapter *adapter)
254{
255 int namelen = sizeof(adapter->msix_info[0].desc) - 1;
256 int pidx;
257
258 /*
259 * Firmware events.
260 */
261 snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
262 "%s-FWeventq", adapter->name);
263 adapter->msix_info[MSIX_FW].desc[namelen] = 0;
264
265 /*
266 * Ethernet queues.
267 */
268 for_each_port(adapter, pidx) {
269 struct net_device *dev = adapter->port[pidx];
270 const struct port_info *pi = netdev_priv(dev);
271 int qs, msi;
272
caedda35 273 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
be839e39
CL
274 snprintf(adapter->msix_info[msi].desc, namelen,
275 "%s-%d", dev->name, qs);
276 adapter->msix_info[msi].desc[namelen] = 0;
277 }
278 }
279}
280
281/*
282 * Request all of our MSI-X resources.
283 */
284static int request_msix_queue_irqs(struct adapter *adapter)
285{
286 struct sge *s = &adapter->sge;
287 int rxq, msi, err;
288
289 /*
290 * Firmware events.
291 */
292 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
293 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
294 if (err)
295 return err;
296
297 /*
298 * Ethernet queues.
299 */
caedda35 300 msi = MSIX_IQFLINT;
be839e39
CL
301 for_each_ethrxq(s, rxq) {
302 err = request_irq(adapter->msix_info[msi].vec,
303 t4vf_sge_intr_msix, 0,
304 adapter->msix_info[msi].desc,
305 &s->ethrxq[rxq].rspq);
306 if (err)
307 goto err_free_irqs;
308 msi++;
309 }
310 return 0;
311
312err_free_irqs:
313 while (--rxq >= 0)
314 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
315 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
316 return err;
317}
318
319/*
320 * Free our MSI-X resources.
321 */
322static void free_msix_queue_irqs(struct adapter *adapter)
323{
324 struct sge *s = &adapter->sge;
325 int rxq, msi;
326
327 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
caedda35 328 msi = MSIX_IQFLINT;
be839e39
CL
329 for_each_ethrxq(s, rxq)
330 free_irq(adapter->msix_info[msi++].vec,
331 &s->ethrxq[rxq].rspq);
332}
333
334/*
335 * Turn on NAPI and start up interrupts on a response queue.
336 */
337static void qenable(struct sge_rspq *rspq)
338{
339 napi_enable(&rspq->napi);
340
341 /*
342 * 0-increment the Going To Sleep register to start the timer and
343 * enable interrupts.
344 */
345 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
346 CIDXINC(0) |
347 SEINTARM(rspq->intr_params) |
348 INGRESSQID(rspq->cntxt_id));
349}
350
351/*
352 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
353 */
354static void enable_rx(struct adapter *adapter)
355{
356 int rxq;
357 struct sge *s = &adapter->sge;
358
359 for_each_ethrxq(s, rxq)
360 qenable(&s->ethrxq[rxq].rspq);
361 qenable(&s->fw_evtq);
362
363 /*
364 * The interrupt queue doesn't use NAPI so we do the 0-increment of
365 * its Going To Sleep register here to get it started.
366 */
367 if (adapter->flags & USING_MSI)
368 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
369 CIDXINC(0) |
370 SEINTARM(s->intrq.intr_params) |
371 INGRESSQID(s->intrq.cntxt_id));
372
373}
374
375/*
376 * Wait until all NAPI handlers are descheduled.
377 */
378static void quiesce_rx(struct adapter *adapter)
379{
380 struct sge *s = &adapter->sge;
381 int rxq;
382
383 for_each_ethrxq(s, rxq)
384 napi_disable(&s->ethrxq[rxq].rspq.napi);
385 napi_disable(&s->fw_evtq.napi);
386}
387
388/*
389 * Response queue handler for the firmware event queue.
390 */
391static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
392 const struct pkt_gl *gl)
393{
394 /*
395 * Extract response opcode and get pointer to CPL message body.
396 */
397 struct adapter *adapter = rspq->adapter;
398 u8 opcode = ((const struct rss_header *)rsp)->opcode;
399 void *cpl = (void *)(rsp + 1);
400
401 switch (opcode) {
402 case CPL_FW6_MSG: {
403 /*
404 * We've received an asynchronous message from the firmware.
405 */
406 const struct cpl_fw6_msg *fw_msg = cpl;
407 if (fw_msg->type == FW6_TYPE_CMD_RPL)
408 t4vf_handle_fw_rpl(adapter, fw_msg->data);
409 break;
410 }
411
412 case CPL_SGE_EGR_UPDATE: {
413 /*
7f9dd2fa
CL
414 * We've received an Egress Queue Status Update message. We
415 * get these, if the SGE is configured to send these when the
416 * firmware passes certain points in processing our TX
417 * Ethernet Queue or if we make an explicit request for one.
418 * We use these updates to determine when we may need to
419 * restart a TX Ethernet Queue which was stopped for lack of
420 * free TX Queue Descriptors ...
be839e39 421 */
64699336 422 const struct cpl_sge_egr_update *p = cpl;
be839e39
CL
423 unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
424 struct sge *s = &adapter->sge;
425 struct sge_txq *tq;
426 struct sge_eth_txq *txq;
427 unsigned int eq_idx;
be839e39
CL
428
429 /*
430 * Perform sanity checking on the Queue ID to make sure it
431 * really refers to one of our TX Ethernet Egress Queues which
432 * is active and matches the queue's ID. None of these error
433 * conditions should ever happen so we may want to either make
434 * them fatal and/or conditionalized under DEBUG.
435 */
436 eq_idx = EQ_IDX(s, qid);
437 if (unlikely(eq_idx >= MAX_EGRQ)) {
438 dev_err(adapter->pdev_dev,
439 "Egress Update QID %d out of range\n", qid);
440 break;
441 }
442 tq = s->egr_map[eq_idx];
443 if (unlikely(tq == NULL)) {
444 dev_err(adapter->pdev_dev,
445 "Egress Update QID %d TXQ=NULL\n", qid);
446 break;
447 }
448 txq = container_of(tq, struct sge_eth_txq, q);
449 if (unlikely(tq->abs_id != qid)) {
450 dev_err(adapter->pdev_dev,
451 "Egress Update QID %d refers to TXQ %d\n",
452 qid, tq->abs_id);
453 break;
454 }
455
be839e39
CL
456 /*
457 * Restart a stopped TX Queue which has less than half of its
458 * TX ring in use ...
459 */
460 txq->q.restarts++;
461 netif_tx_wake_queue(txq->txq);
462 break;
463 }
464
465 default:
466 dev_err(adapter->pdev_dev,
467 "unexpected CPL %#x on FW event queue\n", opcode);
468 }
469
470 return 0;
471}
472
473/*
474 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
475 * to use and initializes them. We support multiple "Queue Sets" per port if
476 * we have MSI-X, otherwise just one queue set per port.
477 */
478static int setup_sge_queues(struct adapter *adapter)
479{
480 struct sge *s = &adapter->sge;
481 int err, pidx, msix;
482
483 /*
484 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
485 * state.
486 */
487 bitmap_zero(s->starving_fl, MAX_EGRQ);
488
489 /*
490 * If we're using MSI interrupt mode we need to set up a "forwarded
491 * interrupt" queue which we'll set up with our MSI vector. The rest
492 * of the ingress queues will be set up to forward their interrupts to
493 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
494 * the intrq's queue ID as the interrupt forwarding queue for the
495 * subsequent calls ...
496 */
497 if (adapter->flags & USING_MSI) {
498 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
499 adapter->port[0], 0, NULL, NULL);
500 if (err)
501 goto err_free_queues;
502 }
503
504 /*
505 * Allocate our ingress queue for asynchronous firmware messages.
506 */
507 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
508 MSIX_FW, NULL, fwevtq_handler);
509 if (err)
510 goto err_free_queues;
511
512 /*
513 * Allocate each "port"'s initial Queue Sets. These can be changed
514 * later on ... up to the point where any interface on the adapter is
515 * brought up at which point lots of things get nailed down
516 * permanently ...
517 */
caedda35 518 msix = MSIX_IQFLINT;
be839e39
CL
519 for_each_port(adapter, pidx) {
520 struct net_device *dev = adapter->port[pidx];
521 struct port_info *pi = netdev_priv(dev);
522 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
523 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
be839e39
CL
524 int qs;
525
c8639a82 526 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
be839e39
CL
527 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
528 dev, msix++,
529 &rxq->fl, t4vf_ethrx_handler);
530 if (err)
531 goto err_free_queues;
532
533 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
534 netdev_get_tx_queue(dev, qs),
535 s->fw_evtq.cntxt_id);
536 if (err)
537 goto err_free_queues;
538
539 rxq->rspq.idx = qs;
540 memset(&rxq->stats, 0, sizeof(rxq->stats));
541 }
542 }
543
544 /*
545 * Create the reverse mappings for the queues.
546 */
547 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
548 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
549 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
550 for_each_port(adapter, pidx) {
551 struct net_device *dev = adapter->port[pidx];
552 struct port_info *pi = netdev_priv(dev);
553 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
554 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
be839e39
CL
555 int qs;
556
c8639a82 557 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
be839e39
CL
558 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
559 EQ_MAP(s, txq->q.abs_id) = &txq->q;
560
561 /*
562 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
563 * for Free Lists but since all of the Egress Queues
564 * (including Free Lists) have Relative Queue IDs
565 * which are computed as Absolute - Base Queue ID, we
566 * can synthesize the Absolute Queue IDs for the Free
567 * Lists. This is useful for debugging purposes when
568 * we want to dump Queue Contexts via the PF Driver.
569 */
570 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
571 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
572 }
573 }
574 return 0;
575
576err_free_queues:
577 t4vf_free_sge_resources(adapter);
578 return err;
579}
580
581/*
582 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
583 * queues. We configure the RSS CPU lookup table to distribute to the number
584 * of HW receive queues, and the response queue lookup table to narrow that
585 * down to the response queues actually configured for each "port" (Virtual
586 * Interface). We always configure the RSS mapping for all ports since the
587 * mapping table has plenty of entries.
588 */
589static int setup_rss(struct adapter *adapter)
590{
591 int pidx;
592
593 for_each_port(adapter, pidx) {
594 struct port_info *pi = adap2pinfo(adapter, pidx);
595 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
596 u16 rss[MAX_PORT_QSETS];
597 int qs, err;
598
599 for (qs = 0; qs < pi->nqsets; qs++)
600 rss[qs] = rxq[qs].rspq.abs_id;
601
602 err = t4vf_config_rss_range(adapter, pi->viid,
603 0, pi->rss_size, rss, pi->nqsets);
604 if (err)
605 return err;
606
607 /*
608 * Perform Global RSS Mode-specific initialization.
609 */
610 switch (adapter->params.rss.mode) {
611 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
612 /*
613 * If Tunnel All Lookup isn't specified in the global
614 * RSS Configuration, then we need to specify a
615 * default Ingress Queue for any ingress packets which
616 * aren't hashed. We'll use our first ingress queue
617 * ...
618 */
619 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
620 union rss_vi_config config;
621 err = t4vf_read_rss_vi_config(adapter,
622 pi->viid,
623 &config);
624 if (err)
625 return err;
626 config.basicvirtual.defaultq =
627 rxq[0].rspq.abs_id;
628 err = t4vf_write_rss_vi_config(adapter,
629 pi->viid,
630 &config);
631 if (err)
632 return err;
633 }
634 break;
635 }
636 }
637
638 return 0;
639}
640
641/*
642 * Bring the adapter up. Called whenever we go from no "ports" open to having
643 * one open. This function performs the actions necessary to make an adapter
644 * operational, such as completing the initialization of HW modules, and
645 * enabling interrupts. Must be called with the rtnl lock held. (Note that
646 * this is called "cxgb_up" in the PF Driver.)
647 */
648static int adapter_up(struct adapter *adapter)
649{
650 int err;
651
652 /*
653 * If this is the first time we've been called, perform basic
654 * adapter setup. Once we've done this, many of our adapter
655 * parameters can no longer be changed ...
656 */
657 if ((adapter->flags & FULL_INIT_DONE) == 0) {
658 err = setup_sge_queues(adapter);
659 if (err)
660 return err;
661 err = setup_rss(adapter);
662 if (err) {
663 t4vf_free_sge_resources(adapter);
664 return err;
665 }
666
667 if (adapter->flags & USING_MSIX)
668 name_msix_vecs(adapter);
669 adapter->flags |= FULL_INIT_DONE;
670 }
671
672 /*
673 * Acquire our interrupt resources. We only support MSI-X and MSI.
674 */
675 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
676 if (adapter->flags & USING_MSIX)
677 err = request_msix_queue_irqs(adapter);
678 else
679 err = request_irq(adapter->pdev->irq,
680 t4vf_intr_handler(adapter), 0,
681 adapter->name, adapter);
682 if (err) {
683 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
684 err);
685 return err;
686 }
687
688 /*
689 * Enable NAPI ingress processing and return success.
690 */
691 enable_rx(adapter);
692 t4vf_sge_start(adapter);
693 return 0;
694}
695
696/*
697 * Bring the adapter down. Called whenever the last "port" (Virtual
698 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
699 * Driver.)
700 */
701static void adapter_down(struct adapter *adapter)
702{
703 /*
704 * Free interrupt resources.
705 */
706 if (adapter->flags & USING_MSIX)
707 free_msix_queue_irqs(adapter);
708 else
709 free_irq(adapter->pdev->irq, adapter);
710
711 /*
712 * Wait for NAPI handlers to finish.
713 */
714 quiesce_rx(adapter);
715}
716
717/*
718 * Start up a net device.
719 */
720static int cxgb4vf_open(struct net_device *dev)
721{
722 int err;
723 struct port_info *pi = netdev_priv(dev);
724 struct adapter *adapter = pi->adapter;
725
726 /*
727 * If this is the first interface that we're opening on the "adapter",
728 * bring the "adapter" up now.
729 */
730 if (adapter->open_device_map == 0) {
731 err = adapter_up(adapter);
732 if (err)
733 return err;
734 }
735
736 /*
737 * Note that this interface is up and start everything up ...
738 */
003ab674
BH
739 netif_set_real_num_tx_queues(dev, pi->nqsets);
740 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
741 if (err)
343a8d13 742 goto err_unwind;
e7a3795f
CL
743 err = link_start(dev);
744 if (err)
343a8d13
CL
745 goto err_unwind;
746
be839e39 747 netif_tx_start_all_queues(dev);
343a8d13 748 set_bit(pi->port_id, &adapter->open_device_map);
be839e39 749 return 0;
343a8d13
CL
750
751err_unwind:
752 if (adapter->open_device_map == 0)
753 adapter_down(adapter);
754 return err;
be839e39
CL
755}
756
757/*
758 * Shut down a net device. This routine is called "cxgb_close" in the PF
759 * Driver ...
760 */
761static int cxgb4vf_stop(struct net_device *dev)
762{
be839e39
CL
763 struct port_info *pi = netdev_priv(dev);
764 struct adapter *adapter = pi->adapter;
765
766 netif_tx_stop_all_queues(dev);
767 netif_carrier_off(dev);
343a8d13 768 t4vf_enable_vi(adapter, pi->viid, false, false);
be839e39
CL
769 pi->link_cfg.link_ok = 0;
770
771 clear_bit(pi->port_id, &adapter->open_device_map);
772 if (adapter->open_device_map == 0)
773 adapter_down(adapter);
774 return 0;
775}
776
777/*
778 * Translate our basic statistics into the standard "ifconfig" statistics.
779 */
780static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
781{
782 struct t4vf_port_stats stats;
783 struct port_info *pi = netdev2pinfo(dev);
784 struct adapter *adapter = pi->adapter;
785 struct net_device_stats *ns = &dev->stats;
786 int err;
787
788 spin_lock(&adapter->stats_lock);
789 err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
790 spin_unlock(&adapter->stats_lock);
791
792 memset(ns, 0, sizeof(*ns));
793 if (err)
794 return ns;
795
796 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
797 stats.tx_ucast_bytes + stats.tx_offload_bytes);
798 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
799 stats.tx_ucast_frames + stats.tx_offload_frames);
800 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
801 stats.rx_ucast_bytes);
802 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
803 stats.rx_ucast_frames);
804 ns->multicast = stats.rx_mcast_frames;
805 ns->tx_errors = stats.tx_drop_frames;
806 ns->rx_errors = stats.rx_err_frames;
807
808 return ns;
809}
810
811/*
42eb59d3
CL
812 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
813 * at a specified offset within the list, into an array of addrss pointers and
814 * return the number collected.
be839e39 815 */
42eb59d3
CL
816static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
817 const u8 **addr,
818 unsigned int offset,
819 unsigned int maxaddrs)
be839e39 820{
42eb59d3 821 unsigned int index = 0;
be839e39
CL
822 unsigned int naddr = 0;
823 const struct netdev_hw_addr *ha;
824
42eb59d3
CL
825 for_each_dev_addr(dev, ha)
826 if (index++ >= offset) {
827 addr[naddr++] = ha->addr;
828 if (naddr >= maxaddrs)
829 break;
830 }
be839e39
CL
831 return naddr;
832}
833
834/*
42eb59d3
CL
835 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
836 * at a specified offset within the list, into an array of addrss pointers and
837 * return the number collected.
be839e39 838 */
42eb59d3
CL
839static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
840 const u8 **addr,
841 unsigned int offset,
842 unsigned int maxaddrs)
be839e39 843{
42eb59d3 844 unsigned int index = 0;
be839e39
CL
845 unsigned int naddr = 0;
846 const struct netdev_hw_addr *ha;
847
42eb59d3
CL
848 netdev_for_each_mc_addr(ha, dev)
849 if (index++ >= offset) {
850 addr[naddr++] = ha->addr;
851 if (naddr >= maxaddrs)
852 break;
853 }
be839e39
CL
854 return naddr;
855}
856
857/*
858 * Configure the exact and hash address filters to handle a port's multicast
859 * and secondary unicast MAC addresses.
860 */
861static int set_addr_filters(const struct net_device *dev, bool sleep)
862{
863 u64 mhash = 0;
864 u64 uhash = 0;
865 bool free = true;
42eb59d3 866 unsigned int offset, naddr;
be839e39 867 const u8 *addr[7];
42eb59d3 868 int ret;
be839e39
CL
869 const struct port_info *pi = netdev_priv(dev);
870
871 /* first do the secondary unicast addresses */
42eb59d3
CL
872 for (offset = 0; ; offset += naddr) {
873 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
874 ARRAY_SIZE(addr));
875 if (naddr == 0)
876 break;
877
be839e39 878 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
42eb59d3 879 naddr, addr, NULL, &uhash, sleep);
be839e39
CL
880 if (ret < 0)
881 return ret;
882
883 free = false;
884 }
885
886 /* next set up the multicast addresses */
42eb59d3
CL
887 for (offset = 0; ; offset += naddr) {
888 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
889 ARRAY_SIZE(addr));
890 if (naddr == 0)
891 break;
892
be839e39 893 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
42eb59d3 894 naddr, addr, NULL, &mhash, sleep);
be839e39
CL
895 if (ret < 0)
896 return ret;
42eb59d3 897 free = false;
be839e39
CL
898 }
899
900 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
901 uhash | mhash, sleep);
902}
903
904/*
905 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
906 * If @mtu is -1 it is left unchanged.
907 */
908static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
909{
910 int ret;
911 struct port_info *pi = netdev_priv(dev);
912
913 ret = set_addr_filters(dev, sleep_ok);
914 if (ret == 0)
915 ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1,
916 (dev->flags & IFF_PROMISC) != 0,
917 (dev->flags & IFF_ALLMULTI) != 0,
918 1, -1, sleep_ok);
919 return ret;
920}
921
922/*
923 * Set the current receive modes on the device.
924 */
925static void cxgb4vf_set_rxmode(struct net_device *dev)
926{
927 /* unfortunately we can't return errors to the stack */
928 set_rxmode(dev, -1, false);
929}
930
931/*
932 * Find the entry in the interrupt holdoff timer value array which comes
933 * closest to the specified interrupt holdoff value.
934 */
935static int closest_timer(const struct sge *s, int us)
936{
937 int i, timer_idx = 0, min_delta = INT_MAX;
938
939 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
940 int delta = us - s->timer_val[i];
941 if (delta < 0)
942 delta = -delta;
943 if (delta < min_delta) {
944 min_delta = delta;
945 timer_idx = i;
946 }
947 }
948 return timer_idx;
949}
950
951static int closest_thres(const struct sge *s, int thres)
952{
953 int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
954
955 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
956 delta = thres - s->counter_val[i];
957 if (delta < 0)
958 delta = -delta;
959 if (delta < min_delta) {
960 min_delta = delta;
961 pktcnt_idx = i;
962 }
963 }
964 return pktcnt_idx;
965}
966
967/*
968 * Return a queue's interrupt hold-off time in us. 0 means no timer.
969 */
970static unsigned int qtimer_val(const struct adapter *adapter,
971 const struct sge_rspq *rspq)
972{
973 unsigned int timer_idx = QINTR_TIMER_IDX_GET(rspq->intr_params);
974
975 return timer_idx < SGE_NTIMERS
976 ? adapter->sge.timer_val[timer_idx]
977 : 0;
978}
979
980/**
981 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
982 * @adapter: the adapter
983 * @rspq: the RX response queue
984 * @us: the hold-off time in us, or 0 to disable timer
985 * @cnt: the hold-off packet count, or 0 to disable counter
986 *
987 * Sets an RX response queue's interrupt hold-off time and packet count.
988 * At least one of the two needs to be enabled for the queue to generate
989 * interrupts.
990 */
991static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
992 unsigned int us, unsigned int cnt)
993{
994 unsigned int timer_idx;
995
996 /*
997 * If both the interrupt holdoff timer and count are specified as
998 * zero, default to a holdoff count of 1 ...
999 */
1000 if ((us | cnt) == 0)
1001 cnt = 1;
1002
1003 /*
1004 * If an interrupt holdoff count has been specified, then find the
1005 * closest configured holdoff count and use that. If the response
1006 * queue has already been created, then update its queue context
1007 * parameters ...
1008 */
1009 if (cnt) {
1010 int err;
1011 u32 v, pktcnt_idx;
1012
1013 pktcnt_idx = closest_thres(&adapter->sge, cnt);
1014 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1015 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1016 FW_PARAMS_PARAM_X(
1017 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1018 FW_PARAMS_PARAM_YZ(rspq->cntxt_id);
1019 err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1020 if (err)
1021 return err;
1022 }
1023 rspq->pktcnt_idx = pktcnt_idx;
1024 }
1025
1026 /*
1027 * Compute the closest holdoff timer index from the supplied holdoff
1028 * timer value.
1029 */
1030 timer_idx = (us == 0
1031 ? SGE_TIMER_RSTRT_CNTR
1032 : closest_timer(&adapter->sge, us));
1033
1034 /*
1035 * Update the response queue's interrupt coalescing parameters and
1036 * return success.
1037 */
1038 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
1039 (cnt > 0 ? QINTR_CNT_EN : 0));
1040 return 0;
1041}
1042
1043/*
1044 * Return a version number to identify the type of adapter. The scheme is:
1045 * - bits 0..9: chip version
1046 * - bits 10..15: chip revision
1047 */
1048static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1049{
1050 /*
1051 * Chip version 4, revision 0x3f (cxgb4vf).
1052 */
622c62b5 1053 return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
be839e39
CL
1054}
1055
1056/*
1057 * Execute the specified ioctl command.
1058 */
1059static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1060{
1061 int ret = 0;
1062
1063 switch (cmd) {
1064 /*
1065 * The VF Driver doesn't have access to any of the other
1066 * common Ethernet device ioctl()'s (like reading/writing
1067 * PHY registers, etc.
1068 */
1069
1070 default:
1071 ret = -EOPNOTSUPP;
1072 break;
1073 }
1074 return ret;
1075}
1076
1077/*
1078 * Change the device's MTU.
1079 */
1080static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1081{
1082 int ret;
1083 struct port_info *pi = netdev_priv(dev);
1084
1085 /* accommodate SACK */
1086 if (new_mtu < 81)
1087 return -EINVAL;
1088
1089 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1090 -1, -1, -1, -1, true);
1091 if (!ret)
1092 dev->mtu = new_mtu;
1093 return ret;
1094}
1095
c8f44aff
MM
1096static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1097 netdev_features_t features)
87737663
JP
1098{
1099 /*
1100 * Since there is no support for separate rx/tx vlan accel
1101 * enable/disable make sure tx flag is always in same state as rx.
1102 */
f646968f
PM
1103 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1104 features |= NETIF_F_HW_VLAN_CTAG_TX;
87737663 1105 else
f646968f 1106 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
87737663
JP
1107
1108 return features;
1109}
1110
c8f44aff
MM
1111static int cxgb4vf_set_features(struct net_device *dev,
1112 netdev_features_t features)
87737663
JP
1113{
1114 struct port_info *pi = netdev_priv(dev);
c8f44aff 1115 netdev_features_t changed = dev->features ^ features;
87737663 1116
f646968f 1117 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
87737663 1118 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
f646968f 1119 features & NETIF_F_HW_VLAN_CTAG_TX, 0);
87737663
JP
1120
1121 return 0;
1122}
1123
be839e39
CL
1124/*
1125 * Change the devices MAC address.
1126 */
1127static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1128{
1129 int ret;
1130 struct sockaddr *addr = _addr;
1131 struct port_info *pi = netdev_priv(dev);
1132
1133 if (!is_valid_ether_addr(addr->sa_data))
504f9b5a 1134 return -EADDRNOTAVAIL;
be839e39
CL
1135
1136 ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1137 addr->sa_data, true);
1138 if (ret < 0)
1139 return ret;
1140
1141 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1142 pi->xact_addr_filt = ret;
1143 return 0;
1144}
1145
be839e39
CL
1146#ifdef CONFIG_NET_POLL_CONTROLLER
1147/*
1148 * Poll all of our receive queues. This is called outside of normal interrupt
1149 * context.
1150 */
1151static void cxgb4vf_poll_controller(struct net_device *dev)
1152{
1153 struct port_info *pi = netdev_priv(dev);
1154 struct adapter *adapter = pi->adapter;
1155
1156 if (adapter->flags & USING_MSIX) {
1157 struct sge_eth_rxq *rxq;
1158 int nqsets;
1159
1160 rxq = &adapter->sge.ethrxq[pi->first_qset];
1161 for (nqsets = pi->nqsets; nqsets; nqsets--) {
1162 t4vf_sge_intr_msix(0, &rxq->rspq);
1163 rxq++;
1164 }
1165 } else
1166 t4vf_intr_handler(adapter)(0, adapter);
1167}
1168#endif
1169
1170/*
1171 * Ethtool operations.
1172 * ===================
1173 *
1174 * Note that we don't support any ethtool operations which change the physical
1175 * state of the port to which we're linked.
1176 */
1177
1178/*
1179 * Return current port link settings.
1180 */
1181static int cxgb4vf_get_settings(struct net_device *dev,
1182 struct ethtool_cmd *cmd)
1183{
1184 const struct port_info *pi = netdev_priv(dev);
1185
1186 cmd->supported = pi->link_cfg.supported;
1187 cmd->advertising = pi->link_cfg.advertising;
70739497
DD
1188 ethtool_cmd_speed_set(cmd,
1189 netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
be839e39
CL
1190 cmd->duplex = DUPLEX_FULL;
1191
1192 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1193 cmd->phy_address = pi->port_id;
1194 cmd->transceiver = XCVR_EXTERNAL;
1195 cmd->autoneg = pi->link_cfg.autoneg;
1196 cmd->maxtxpkt = 0;
1197 cmd->maxrxpkt = 0;
1198 return 0;
1199}
1200
1201/*
1202 * Return our driver information.
1203 */
1204static void cxgb4vf_get_drvinfo(struct net_device *dev,
1205 struct ethtool_drvinfo *drvinfo)
1206{
1207 struct adapter *adapter = netdev2adap(dev);
1208
23020ab3
RJ
1209 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1210 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1211 strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1212 sizeof(drvinfo->bus_info));
be839e39
CL
1213 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1214 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1215 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
1216 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.fwrev),
1217 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.fwrev),
1218 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.fwrev),
1219 FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.tprev),
1220 FW_HDR_FW_VER_MINOR_GET(adapter->params.dev.tprev),
1221 FW_HDR_FW_VER_MICRO_GET(adapter->params.dev.tprev),
1222 FW_HDR_FW_VER_BUILD_GET(adapter->params.dev.tprev));
1223}
1224
1225/*
1226 * Return current adapter message level.
1227 */
1228static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1229{
1230 return netdev2adap(dev)->msg_enable;
1231}
1232
1233/*
1234 * Set current adapter message level.
1235 */
1236static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1237{
1238 netdev2adap(dev)->msg_enable = msglevel;
1239}
1240
1241/*
1242 * Return the device's current Queue Set ring size parameters along with the
1243 * allowed maximum values. Since ethtool doesn't understand the concept of
1244 * multi-queue devices, we just return the current values associated with the
1245 * first Queue Set.
1246 */
1247static void cxgb4vf_get_ringparam(struct net_device *dev,
1248 struct ethtool_ringparam *rp)
1249{
1250 const struct port_info *pi = netdev_priv(dev);
1251 const struct sge *s = &pi->adapter->sge;
1252
1253 rp->rx_max_pending = MAX_RX_BUFFERS;
1254 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1255 rp->rx_jumbo_max_pending = 0;
1256 rp->tx_max_pending = MAX_TXQ_ENTRIES;
1257
1258 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1259 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1260 rp->rx_jumbo_pending = 0;
1261 rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1262}
1263
1264/*
1265 * Set the Queue Set ring size parameters for the device. Again, since
1266 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1267 * apply these new values across all of the Queue Sets associated with the
1268 * device -- after vetting them of course!
1269 */
1270static int cxgb4vf_set_ringparam(struct net_device *dev,
1271 struct ethtool_ringparam *rp)
1272{
1273 const struct port_info *pi = netdev_priv(dev);
1274 struct adapter *adapter = pi->adapter;
1275 struct sge *s = &adapter->sge;
1276 int qs;
1277
1278 if (rp->rx_pending > MAX_RX_BUFFERS ||
1279 rp->rx_jumbo_pending ||
1280 rp->tx_pending > MAX_TXQ_ENTRIES ||
1281 rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1282 rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1283 rp->rx_pending < MIN_FL_ENTRIES ||
1284 rp->tx_pending < MIN_TXQ_ENTRIES)
1285 return -EINVAL;
1286
1287 if (adapter->flags & FULL_INIT_DONE)
1288 return -EBUSY;
1289
1290 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1291 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1292 s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1293 s->ethtxq[qs].q.size = rp->tx_pending;
1294 }
1295 return 0;
1296}
1297
1298/*
1299 * Return the interrupt holdoff timer and count for the first Queue Set on the
1300 * device. Our extension ioctl() (the cxgbtool interface) allows the
1301 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1302 */
1303static int cxgb4vf_get_coalesce(struct net_device *dev,
1304 struct ethtool_coalesce *coalesce)
1305{
1306 const struct port_info *pi = netdev_priv(dev);
1307 const struct adapter *adapter = pi->adapter;
1308 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1309
1310 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1311 coalesce->rx_max_coalesced_frames =
1312 ((rspq->intr_params & QINTR_CNT_EN)
1313 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1314 : 0);
1315 return 0;
1316}
1317
1318/*
1319 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1320 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1321 * the interrupt holdoff timer on any of the device's Queue Sets.
1322 */
1323static int cxgb4vf_set_coalesce(struct net_device *dev,
1324 struct ethtool_coalesce *coalesce)
1325{
1326 const struct port_info *pi = netdev_priv(dev);
1327 struct adapter *adapter = pi->adapter;
1328
1329 return set_rxq_intr_params(adapter,
1330 &adapter->sge.ethrxq[pi->first_qset].rspq,
1331 coalesce->rx_coalesce_usecs,
1332 coalesce->rx_max_coalesced_frames);
1333}
1334
1335/*
1336 * Report current port link pause parameter settings.
1337 */
1338static void cxgb4vf_get_pauseparam(struct net_device *dev,
1339 struct ethtool_pauseparam *pauseparam)
1340{
1341 struct port_info *pi = netdev_priv(dev);
1342
1343 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1344 pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1345 pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1346}
1347
be839e39
CL
1348/*
1349 * Identify the port by blinking the port's LED.
1350 */
857a3d0f
DM
1351static int cxgb4vf_phys_id(struct net_device *dev,
1352 enum ethtool_phys_id_state state)
be839e39 1353{
857a3d0f 1354 unsigned int val;
be839e39
CL
1355 struct port_info *pi = netdev_priv(dev);
1356
857a3d0f
DM
1357 if (state == ETHTOOL_ID_ACTIVE)
1358 val = 0xffff;
1359 else if (state == ETHTOOL_ID_INACTIVE)
1360 val = 0;
1361 else
1362 return -EINVAL;
1363
1364 return t4vf_identify_port(pi->adapter, pi->viid, val);
be839e39
CL
1365}
1366
1367/*
1368 * Port stats maintained per queue of the port.
1369 */
1370struct queue_port_stats {
1371 u64 tso;
1372 u64 tx_csum;
1373 u64 rx_csum;
1374 u64 vlan_ex;
1375 u64 vlan_ins;
f12fe353
CL
1376 u64 lro_pkts;
1377 u64 lro_merged;
be839e39
CL
1378};
1379
1380/*
1381 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1382 * these need to match the order of statistics returned by
1383 * t4vf_get_port_stats().
1384 */
1385static const char stats_strings[][ETH_GSTRING_LEN] = {
1386 /*
1387 * These must match the layout of the t4vf_port_stats structure.
1388 */
1389 "TxBroadcastBytes ",
1390 "TxBroadcastFrames ",
1391 "TxMulticastBytes ",
1392 "TxMulticastFrames ",
1393 "TxUnicastBytes ",
1394 "TxUnicastFrames ",
1395 "TxDroppedFrames ",
1396 "TxOffloadBytes ",
1397 "TxOffloadFrames ",
1398 "RxBroadcastBytes ",
1399 "RxBroadcastFrames ",
1400 "RxMulticastBytes ",
1401 "RxMulticastFrames ",
1402 "RxUnicastBytes ",
1403 "RxUnicastFrames ",
1404 "RxErrorFrames ",
1405
1406 /*
1407 * These are accumulated per-queue statistics and must match the
1408 * order of the fields in the queue_port_stats structure.
1409 */
1410 "TSO ",
1411 "TxCsumOffload ",
1412 "RxCsumGood ",
1413 "VLANextractions ",
1414 "VLANinsertions ",
f12fe353
CL
1415 "GROPackets ",
1416 "GROMerged ",
be839e39
CL
1417};
1418
1419/*
1420 * Return the number of statistics in the specified statistics set.
1421 */
1422static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1423{
1424 switch (sset) {
1425 case ETH_SS_STATS:
1426 return ARRAY_SIZE(stats_strings);
1427 default:
1428 return -EOPNOTSUPP;
1429 }
1430 /*NOTREACHED*/
1431}
1432
1433/*
1434 * Return the strings for the specified statistics set.
1435 */
1436static void cxgb4vf_get_strings(struct net_device *dev,
1437 u32 sset,
1438 u8 *data)
1439{
1440 switch (sset) {
1441 case ETH_SS_STATS:
1442 memcpy(data, stats_strings, sizeof(stats_strings));
1443 break;
1444 }
1445}
1446
1447/*
1448 * Small utility routine to accumulate queue statistics across the queues of
1449 * a "port".
1450 */
1451static void collect_sge_port_stats(const struct adapter *adapter,
1452 const struct port_info *pi,
1453 struct queue_port_stats *stats)
1454{
1455 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1456 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1457 int qs;
1458
1459 memset(stats, 0, sizeof(*stats));
1460 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1461 stats->tso += txq->tso;
1462 stats->tx_csum += txq->tx_cso;
1463 stats->rx_csum += rxq->stats.rx_cso;
1464 stats->vlan_ex += rxq->stats.vlan_ex;
1465 stats->vlan_ins += txq->vlan_ins;
f12fe353
CL
1466 stats->lro_pkts += rxq->stats.lro_pkts;
1467 stats->lro_merged += rxq->stats.lro_merged;
be839e39
CL
1468 }
1469}
1470
1471/*
1472 * Return the ETH_SS_STATS statistics set.
1473 */
1474static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1475 struct ethtool_stats *stats,
1476 u64 *data)
1477{
1478 struct port_info *pi = netdev2pinfo(dev);
1479 struct adapter *adapter = pi->adapter;
1480 int err = t4vf_get_port_stats(adapter, pi->pidx,
1481 (struct t4vf_port_stats *)data);
1482 if (err)
1483 memset(data, 0, sizeof(struct t4vf_port_stats));
1484
1485 data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1486 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1487}
1488
1489/*
1490 * Return the size of our register map.
1491 */
1492static int cxgb4vf_get_regs_len(struct net_device *dev)
1493{
1494 return T4VF_REGMAP_SIZE;
1495}
1496
1497/*
1498 * Dump a block of registers, start to end inclusive, into a buffer.
1499 */
1500static void reg_block_dump(struct adapter *adapter, void *regbuf,
1501 unsigned int start, unsigned int end)
1502{
1503 u32 *bp = regbuf + start - T4VF_REGMAP_START;
1504
1505 for ( ; start <= end; start += sizeof(u32)) {
1506 /*
1507 * Avoid reading the Mailbox Control register since that
1508 * can trigger a Mailbox Ownership Arbitration cycle and
1509 * interfere with communication with the firmware.
1510 */
1511 if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1512 *bp++ = 0xffff;
1513 else
1514 *bp++ = t4_read_reg(adapter, start);
1515 }
1516}
1517
1518/*
1519 * Copy our entire register map into the provided buffer.
1520 */
1521static void cxgb4vf_get_regs(struct net_device *dev,
1522 struct ethtool_regs *regs,
1523 void *regbuf)
1524{
1525 struct adapter *adapter = netdev2adap(dev);
1526
1527 regs->version = mk_adap_vers(adapter);
1528
1529 /*
1530 * Fill in register buffer with our register map.
1531 */
1532 memset(regbuf, 0, T4VF_REGMAP_SIZE);
1533
1534 reg_block_dump(adapter, regbuf,
1535 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1536 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1537 reg_block_dump(adapter, regbuf,
1538 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1539 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1540 reg_block_dump(adapter, regbuf,
1541 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1542 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
1543 reg_block_dump(adapter, regbuf,
1544 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1545 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1546
1547 reg_block_dump(adapter, regbuf,
1548 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1549 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1550}
1551
1552/*
1553 * Report current Wake On LAN settings.
1554 */
1555static void cxgb4vf_get_wol(struct net_device *dev,
1556 struct ethtool_wolinfo *wol)
1557{
1558 wol->supported = 0;
1559 wol->wolopts = 0;
1560 memset(&wol->sopass, 0, sizeof(wol->sopass));
1561}
1562
410989f6
CL
1563/*
1564 * TCP Segmentation Offload flags which we support.
1565 */
1566#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1567
9b07be4b 1568static const struct ethtool_ops cxgb4vf_ethtool_ops = {
be839e39
CL
1569 .get_settings = cxgb4vf_get_settings,
1570 .get_drvinfo = cxgb4vf_get_drvinfo,
1571 .get_msglevel = cxgb4vf_get_msglevel,
1572 .set_msglevel = cxgb4vf_set_msglevel,
1573 .get_ringparam = cxgb4vf_get_ringparam,
1574 .set_ringparam = cxgb4vf_set_ringparam,
1575 .get_coalesce = cxgb4vf_get_coalesce,
1576 .set_coalesce = cxgb4vf_set_coalesce,
1577 .get_pauseparam = cxgb4vf_get_pauseparam,
be839e39
CL
1578 .get_link = ethtool_op_get_link,
1579 .get_strings = cxgb4vf_get_strings,
857a3d0f 1580 .set_phys_id = cxgb4vf_phys_id,
be839e39
CL
1581 .get_sset_count = cxgb4vf_get_sset_count,
1582 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1583 .get_regs_len = cxgb4vf_get_regs_len,
1584 .get_regs = cxgb4vf_get_regs,
1585 .get_wol = cxgb4vf_get_wol,
be839e39
CL
1586};
1587
1588/*
1589 * /sys/kernel/debug/cxgb4vf support code and data.
1590 * ================================================
1591 */
1592
1593/*
1594 * Show SGE Queue Set information. We display QPL Queues Sets per line.
1595 */
1596#define QPL 4
1597
1598static int sge_qinfo_show(struct seq_file *seq, void *v)
1599{
1600 struct adapter *adapter = seq->private;
1601 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1602 int qs, r = (uintptr_t)v - 1;
1603
1604 if (r)
1605 seq_putc(seq, '\n');
1606
1607 #define S3(fmt_spec, s, v) \
1608 do {\
1609 seq_printf(seq, "%-12s", s); \
1610 for (qs = 0; qs < n; ++qs) \
1611 seq_printf(seq, " %16" fmt_spec, v); \
1612 seq_putc(seq, '\n'); \
1613 } while (0)
1614 #define S(s, v) S3("s", s, v)
1615 #define T(s, v) S3("u", s, txq[qs].v)
1616 #define R(s, v) S3("u", s, rxq[qs].v)
1617
1618 if (r < eth_entries) {
1619 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1620 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1621 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1622
1623 S("QType:", "Ethernet");
1624 S("Interface:",
1625 (rxq[qs].rspq.netdev
1626 ? rxq[qs].rspq.netdev->name
1627 : "N/A"));
1628 S3("d", "Port:",
1629 (rxq[qs].rspq.netdev
1630 ? ((struct port_info *)
1631 netdev_priv(rxq[qs].rspq.netdev))->port_id
1632 : -1));
1633 T("TxQ ID:", q.abs_id);
1634 T("TxQ size:", q.size);
1635 T("TxQ inuse:", q.in_use);
1636 T("TxQ PIdx:", q.pidx);
1637 T("TxQ CIdx:", q.cidx);
1638 R("RspQ ID:", rspq.abs_id);
1639 R("RspQ size:", rspq.size);
1640 R("RspQE size:", rspq.iqe_len);
1641 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1642 S3("u", "Intr pktcnt:",
1643 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1644 R("RspQ CIdx:", rspq.cidx);
1645 R("RspQ Gen:", rspq.gen);
1646 R("FL ID:", fl.abs_id);
1647 R("FL size:", fl.size - MIN_FL_RESID);
1648 R("FL avail:", fl.avail);
1649 R("FL PIdx:", fl.pidx);
1650 R("FL CIdx:", fl.cidx);
1651 return 0;
1652 }
1653
1654 r -= eth_entries;
1655 if (r == 0) {
1656 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1657
1658 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1659 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1660 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1661 qtimer_val(adapter, evtq));
1662 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1663 adapter->sge.counter_val[evtq->pktcnt_idx]);
1664 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1665 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1666 } else if (r == 1) {
1667 const struct sge_rspq *intrq = &adapter->sge.intrq;
1668
1669 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1670 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1671 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1672 qtimer_val(adapter, intrq));
1673 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1674 adapter->sge.counter_val[intrq->pktcnt_idx]);
1675 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1676 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1677 }
1678
1679 #undef R
1680 #undef T
1681 #undef S
1682 #undef S3
1683
1684 return 0;
1685}
1686
1687/*
1688 * Return the number of "entries" in our "file". We group the multi-Queue
1689 * sections with QPL Queue Sets per "entry". The sections of the output are:
1690 *
1691 * Ethernet RX/TX Queue Sets
1692 * Firmware Event Queue
1693 * Forwarded Interrupt Queue (if in MSI mode)
1694 */
1695static int sge_queue_entries(const struct adapter *adapter)
1696{
1697 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1698 ((adapter->flags & USING_MSI) != 0);
1699}
1700
1701static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1702{
1703 int entries = sge_queue_entries(seq->private);
1704
1705 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1706}
1707
1708static void sge_queue_stop(struct seq_file *seq, void *v)
1709{
1710}
1711
1712static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1713{
1714 int entries = sge_queue_entries(seq->private);
1715
1716 ++*pos;
1717 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1718}
1719
1720static const struct seq_operations sge_qinfo_seq_ops = {
1721 .start = sge_queue_start,
1722 .next = sge_queue_next,
1723 .stop = sge_queue_stop,
1724 .show = sge_qinfo_show
1725};
1726
1727static int sge_qinfo_open(struct inode *inode, struct file *file)
1728{
1729 int res = seq_open(file, &sge_qinfo_seq_ops);
1730
1731 if (!res) {
1732 struct seq_file *seq = file->private_data;
1733 seq->private = inode->i_private;
1734 }
1735 return res;
1736}
1737
1738static const struct file_operations sge_qinfo_debugfs_fops = {
1739 .owner = THIS_MODULE,
1740 .open = sge_qinfo_open,
1741 .read = seq_read,
1742 .llseek = seq_lseek,
1743 .release = seq_release,
1744};
1745
1746/*
1747 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1748 */
1749#define QPL 4
1750
1751static int sge_qstats_show(struct seq_file *seq, void *v)
1752{
1753 struct adapter *adapter = seq->private;
1754 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1755 int qs, r = (uintptr_t)v - 1;
1756
1757 if (r)
1758 seq_putc(seq, '\n');
1759
1760 #define S3(fmt, s, v) \
1761 do { \
1762 seq_printf(seq, "%-16s", s); \
1763 for (qs = 0; qs < n; ++qs) \
1764 seq_printf(seq, " %8" fmt, v); \
1765 seq_putc(seq, '\n'); \
1766 } while (0)
1767 #define S(s, v) S3("s", s, v)
1768
1769 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1770 #define T(s, v) T3("lu", s, v)
1771
1772 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1773 #define R(s, v) R3("lu", s, v)
1774
1775 if (r < eth_entries) {
1776 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1777 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1778 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1779
1780 S("QType:", "Ethernet");
1781 S("Interface:",
1782 (rxq[qs].rspq.netdev
1783 ? rxq[qs].rspq.netdev->name
1784 : "N/A"));
68dc9d36 1785 R3("u", "RspQNullInts:", rspq.unhandled_irqs);
be839e39
CL
1786 R("RxPackets:", stats.pkts);
1787 R("RxCSO:", stats.rx_cso);
1788 R("VLANxtract:", stats.vlan_ex);
1789 R("LROmerged:", stats.lro_merged);
1790 R("LROpackets:", stats.lro_pkts);
1791 R("RxDrops:", stats.rx_drops);
1792 T("TSO:", tso);
1793 T("TxCSO:", tx_cso);
1794 T("VLANins:", vlan_ins);
1795 T("TxQFull:", q.stops);
1796 T("TxQRestarts:", q.restarts);
1797 T("TxMapErr:", mapping_err);
1798 R("FLAllocErr:", fl.alloc_failed);
1799 R("FLLrgAlcErr:", fl.large_alloc_failed);
1800 R("FLStarving:", fl.starving);
1801 return 0;
1802 }
1803
1804 r -= eth_entries;
1805 if (r == 0) {
1806 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1807
1808 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
68dc9d36
CL
1809 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1810 evtq->unhandled_irqs);
be839e39
CL
1811 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
1812 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
1813 } else if (r == 1) {
1814 const struct sge_rspq *intrq = &adapter->sge.intrq;
1815
1816 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
68dc9d36
CL
1817 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
1818 intrq->unhandled_irqs);
be839e39
CL
1819 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
1820 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
1821 }
1822
1823 #undef R
1824 #undef T
1825 #undef S
1826 #undef R3
1827 #undef T3
1828 #undef S3
1829
1830 return 0;
1831}
1832
1833/*
1834 * Return the number of "entries" in our "file". We group the multi-Queue
1835 * sections with QPL Queue Sets per "entry". The sections of the output are:
1836 *
1837 * Ethernet RX/TX Queue Sets
1838 * Firmware Event Queue
1839 * Forwarded Interrupt Queue (if in MSI mode)
1840 */
1841static int sge_qstats_entries(const struct adapter *adapter)
1842{
1843 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1844 ((adapter->flags & USING_MSI) != 0);
1845}
1846
1847static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
1848{
1849 int entries = sge_qstats_entries(seq->private);
1850
1851 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1852}
1853
1854static void sge_qstats_stop(struct seq_file *seq, void *v)
1855{
1856}
1857
1858static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
1859{
1860 int entries = sge_qstats_entries(seq->private);
1861
1862 (*pos)++;
1863 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1864}
1865
1866static const struct seq_operations sge_qstats_seq_ops = {
1867 .start = sge_qstats_start,
1868 .next = sge_qstats_next,
1869 .stop = sge_qstats_stop,
1870 .show = sge_qstats_show
1871};
1872
1873static int sge_qstats_open(struct inode *inode, struct file *file)
1874{
1875 int res = seq_open(file, &sge_qstats_seq_ops);
1876
1877 if (res == 0) {
1878 struct seq_file *seq = file->private_data;
1879 seq->private = inode->i_private;
1880 }
1881 return res;
1882}
1883
1884static const struct file_operations sge_qstats_proc_fops = {
1885 .owner = THIS_MODULE,
1886 .open = sge_qstats_open,
1887 .read = seq_read,
1888 .llseek = seq_lseek,
1889 .release = seq_release,
1890};
1891
1892/*
1893 * Show PCI-E SR-IOV Virtual Function Resource Limits.
1894 */
1895static int resources_show(struct seq_file *seq, void *v)
1896{
1897 struct adapter *adapter = seq->private;
1898 struct vf_resources *vfres = &adapter->params.vfres;
1899
1900 #define S(desc, fmt, var) \
1901 seq_printf(seq, "%-60s " fmt "\n", \
1902 desc " (" #var "):", vfres->var)
1903
1904 S("Virtual Interfaces", "%d", nvi);
1905 S("Egress Queues", "%d", neq);
1906 S("Ethernet Control", "%d", nethctrl);
1907 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
1908 S("Ingress Queues", "%d", niq);
1909 S("Traffic Class", "%d", tc);
1910 S("Port Access Rights Mask", "%#x", pmask);
1911 S("MAC Address Filters", "%d", nexactf);
1912 S("Firmware Command Read Capabilities", "%#x", r_caps);
1913 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
1914
1915 #undef S
1916
1917 return 0;
1918}
1919
1920static int resources_open(struct inode *inode, struct file *file)
1921{
1922 return single_open(file, resources_show, inode->i_private);
1923}
1924
1925static const struct file_operations resources_proc_fops = {
1926 .owner = THIS_MODULE,
1927 .open = resources_open,
1928 .read = seq_read,
1929 .llseek = seq_lseek,
1930 .release = single_release,
1931};
1932
1933/*
1934 * Show Virtual Interfaces.
1935 */
1936static int interfaces_show(struct seq_file *seq, void *v)
1937{
1938 if (v == SEQ_START_TOKEN) {
1939 seq_puts(seq, "Interface Port VIID\n");
1940 } else {
1941 struct adapter *adapter = seq->private;
1942 int pidx = (uintptr_t)v - 2;
1943 struct net_device *dev = adapter->port[pidx];
1944 struct port_info *pi = netdev_priv(dev);
1945
1946 seq_printf(seq, "%9s %4d %#5x\n",
1947 dev->name, pi->port_id, pi->viid);
1948 }
1949 return 0;
1950}
1951
1952static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
1953{
1954 return pos <= adapter->params.nports
1955 ? (void *)(uintptr_t)(pos + 1)
1956 : NULL;
1957}
1958
1959static void *interfaces_start(struct seq_file *seq, loff_t *pos)
1960{
1961 return *pos
1962 ? interfaces_get_idx(seq->private, *pos)
1963 : SEQ_START_TOKEN;
1964}
1965
1966static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
1967{
1968 (*pos)++;
1969 return interfaces_get_idx(seq->private, *pos);
1970}
1971
1972static void interfaces_stop(struct seq_file *seq, void *v)
1973{
1974}
1975
1976static const struct seq_operations interfaces_seq_ops = {
1977 .start = interfaces_start,
1978 .next = interfaces_next,
1979 .stop = interfaces_stop,
1980 .show = interfaces_show
1981};
1982
1983static int interfaces_open(struct inode *inode, struct file *file)
1984{
1985 int res = seq_open(file, &interfaces_seq_ops);
1986
1987 if (res == 0) {
1988 struct seq_file *seq = file->private_data;
1989 seq->private = inode->i_private;
1990 }
1991 return res;
1992}
1993
1994static const struct file_operations interfaces_proc_fops = {
1995 .owner = THIS_MODULE,
1996 .open = interfaces_open,
1997 .read = seq_read,
1998 .llseek = seq_lseek,
1999 .release = seq_release,
2000};
2001
2002/*
2003 * /sys/kernel/debugfs/cxgb4vf/ files list.
2004 */
2005struct cxgb4vf_debugfs_entry {
2006 const char *name; /* name of debugfs node */
f4ae40a6 2007 umode_t mode; /* file system mode */
be839e39
CL
2008 const struct file_operations *fops;
2009};
2010
2011static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2012 { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
2013 { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2014 { "resources", S_IRUGO, &resources_proc_fops },
2015 { "interfaces", S_IRUGO, &interfaces_proc_fops },
2016};
2017
2018/*
2019 * Module and device initialization and cleanup code.
2020 * ==================================================
2021 */
2022
2023/*
2024 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2025 * directory (debugfs_root) has already been set up.
2026 */
d289f864 2027static int setup_debugfs(struct adapter *adapter)
be839e39
CL
2028{
2029 int i;
2030
843635e0 2031 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
be839e39
CL
2032
2033 /*
2034 * Debugfs support is best effort.
2035 */
2036 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2037 (void)debugfs_create_file(debugfs_files[i].name,
2038 debugfs_files[i].mode,
2039 adapter->debugfs_root,
2040 (void *)adapter,
2041 debugfs_files[i].fops);
2042
2043 return 0;
2044}
2045
2046/*
2047 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2048 * it to our caller to tear down the directory (debugfs_root).
2049 */
4204875d 2050static void cleanup_debugfs(struct adapter *adapter)
be839e39 2051{
843635e0 2052 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
be839e39
CL
2053
2054 /*
2055 * Unlike our sister routine cleanup_proc(), we don't need to remove
2056 * individual entries because a call will be made to
2057 * debugfs_remove_recursive(). We just need to clean up any ancillary
2058 * persistent state.
2059 */
2060 /* nothing to do */
2061}
2062
2063/*
2064 * Perform early "adapter" initialization. This is where we discover what
2065 * adapter parameters we're going to be using and initialize basic adapter
2066 * hardware support.
2067 */
d289f864 2068static int adap_init0(struct adapter *adapter)
be839e39
CL
2069{
2070 struct vf_resources *vfres = &adapter->params.vfres;
2071 struct sge_params *sge_params = &adapter->params.sge;
2072 struct sge *s = &adapter->sge;
2073 unsigned int ethqsets;
2074 int err;
2075
2076 /*
2077 * Wait for the device to become ready before proceeding ...
2078 */
2079 err = t4vf_wait_dev_ready(adapter);
2080 if (err) {
2081 dev_err(adapter->pdev_dev, "device didn't become ready:"
2082 " err=%d\n", err);
2083 return err;
2084 }
2085
e68e6133
CL
2086 /*
2087 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2088 * 2.6.31 and later we can't call pci_reset_function() in order to
2089 * issue an FLR because of a self- deadlock on the device semaphore.
2090 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2091 * cases where they're needed -- for instance, some versions of KVM
2092 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2093 * use the firmware based reset in order to reset any per function
2094 * state.
2095 */
2096 err = t4vf_fw_reset(adapter);
2097 if (err < 0) {
2098 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2099 return err;
2100 }
2101
622c62b5
SR
2102 switch (adapter->pdev->device >> 12) {
2103 case CHELSIO_T4:
2104 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
2105 break;
2106 case CHELSIO_T5:
2107 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
2108 break;
2109 }
2110
be839e39
CL
2111 /*
2112 * Grab basic operational parameters. These will predominantly have
2113 * been set up by the Physical Function Driver or will be hard coded
2114 * into the adapter. We just have to live with them ... Note that
2115 * we _must_ get our VPD parameters before our SGE parameters because
2116 * we need to know the adapter's core clock from the VPD in order to
2117 * properly decode the SGE Timer Values.
2118 */
2119 err = t4vf_get_dev_params(adapter);
2120 if (err) {
2121 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2122 " device parameters: err=%d\n", err);
2123 return err;
2124 }
2125 err = t4vf_get_vpd_params(adapter);
2126 if (err) {
2127 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2128 " VPD parameters: err=%d\n", err);
2129 return err;
2130 }
2131 err = t4vf_get_sge_params(adapter);
2132 if (err) {
2133 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2134 " SGE parameters: err=%d\n", err);
2135 return err;
2136 }
2137 err = t4vf_get_rss_glb_config(adapter);
2138 if (err) {
2139 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2140 " RSS parameters: err=%d\n", err);
2141 return err;
2142 }
2143 if (adapter->params.rss.mode !=
2144 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2145 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2146 " mode %d\n", adapter->params.rss.mode);
2147 return -EINVAL;
2148 }
2149 err = t4vf_sge_init(adapter);
2150 if (err) {
2151 dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2152 " err=%d\n", err);
2153 return err;
2154 }
2155
2156 /*
2157 * Retrieve our RX interrupt holdoff timer values and counter
2158 * threshold values from the SGE parameters.
2159 */
2160 s->timer_val[0] = core_ticks_to_us(adapter,
2161 TIMERVALUE0_GET(sge_params->sge_timer_value_0_and_1));
2162 s->timer_val[1] = core_ticks_to_us(adapter,
2163 TIMERVALUE1_GET(sge_params->sge_timer_value_0_and_1));
2164 s->timer_val[2] = core_ticks_to_us(adapter,
2165 TIMERVALUE0_GET(sge_params->sge_timer_value_2_and_3));
2166 s->timer_val[3] = core_ticks_to_us(adapter,
2167 TIMERVALUE1_GET(sge_params->sge_timer_value_2_and_3));
2168 s->timer_val[4] = core_ticks_to_us(adapter,
2169 TIMERVALUE0_GET(sge_params->sge_timer_value_4_and_5));
2170 s->timer_val[5] = core_ticks_to_us(adapter,
2171 TIMERVALUE1_GET(sge_params->sge_timer_value_4_and_5));
2172
2173 s->counter_val[0] =
2174 THRESHOLD_0_GET(sge_params->sge_ingress_rx_threshold);
2175 s->counter_val[1] =
2176 THRESHOLD_1_GET(sge_params->sge_ingress_rx_threshold);
2177 s->counter_val[2] =
2178 THRESHOLD_2_GET(sge_params->sge_ingress_rx_threshold);
2179 s->counter_val[3] =
2180 THRESHOLD_3_GET(sge_params->sge_ingress_rx_threshold);
2181
2182 /*
2183 * Grab our Virtual Interface resource allocation, extract the
2184 * features that we're interested in and do a bit of sanity testing on
2185 * what we discover.
2186 */
2187 err = t4vf_get_vfres(adapter);
2188 if (err) {
2189 dev_err(adapter->pdev_dev, "unable to get virtual interface"
2190 " resources: err=%d\n", err);
2191 return err;
2192 }
2193
2194 /*
2195 * The number of "ports" which we support is equal to the number of
2196 * Virtual Interfaces with which we've been provisioned.
2197 */
2198 adapter->params.nports = vfres->nvi;
2199 if (adapter->params.nports > MAX_NPORTS) {
2200 dev_warn(adapter->pdev_dev, "only using %d of %d allowed"
2201 " virtual interfaces\n", MAX_NPORTS,
2202 adapter->params.nports);
2203 adapter->params.nports = MAX_NPORTS;
2204 }
2205
2206 /*
2207 * We need to reserve a number of the ingress queues with Free List
2208 * and Interrupt capabilities for special interrupt purposes (like
2209 * asynchronous firmware messages, or forwarded interrupts if we're
2210 * using MSI). The rest of the FL/Intr-capable ingress queues will be
2211 * matched up one-for-one with Ethernet/Control egress queues in order
2212 * to form "Queue Sets" which will be aportioned between the "ports".
2213 * For each Queue Set, we'll need the ability to allocate two Egress
2214 * Contexts -- one for the Ingress Queue Free List and one for the TX
2215 * Ethernet Queue.
2216 */
2217 ethqsets = vfres->niqflint - INGQ_EXTRAS;
2218 if (vfres->nethctrl != ethqsets) {
2219 dev_warn(adapter->pdev_dev, "unequal number of [available]"
2220 " ingress/egress queues (%d/%d); using minimum for"
2221 " number of Queue Sets\n", ethqsets, vfres->nethctrl);
2222 ethqsets = min(vfres->nethctrl, ethqsets);
2223 }
2224 if (vfres->neq < ethqsets*2) {
2225 dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)"
2226 " to support Queue Sets (%d); reducing allowed Queue"
2227 " Sets\n", vfres->neq, ethqsets);
2228 ethqsets = vfres->neq/2;
2229 }
2230 if (ethqsets > MAX_ETH_QSETS) {
2231 dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue"
2232 " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets);
2233 ethqsets = MAX_ETH_QSETS;
2234 }
2235 if (vfres->niq != 0 || vfres->neq > ethqsets*2) {
2236 dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)"
2237 " ignored\n", vfres->niq, vfres->neq - ethqsets*2);
2238 }
2239 adapter->sge.max_ethqsets = ethqsets;
2240
2241 /*
2242 * Check for various parameter sanity issues. Most checks simply
2243 * result in us using fewer resources than our provissioning but we
2244 * do need at least one "port" with which to work ...
2245 */
2246 if (adapter->sge.max_ethqsets < adapter->params.nports) {
2247 dev_warn(adapter->pdev_dev, "only using %d of %d available"
2248 " virtual interfaces (too few Queue Sets)\n",
2249 adapter->sge.max_ethqsets, adapter->params.nports);
2250 adapter->params.nports = adapter->sge.max_ethqsets;
2251 }
2252 if (adapter->params.nports == 0) {
2253 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2254 "usable!\n");
2255 return -EINVAL;
2256 }
2257 return 0;
2258}
2259
2260static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2261 u8 pkt_cnt_idx, unsigned int size,
2262 unsigned int iqe_size)
2263{
2264 rspq->intr_params = (QINTR_TIMER_IDX(timer_idx) |
2265 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0));
2266 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2267 ? pkt_cnt_idx
2268 : 0);
2269 rspq->iqe_len = iqe_size;
2270 rspq->size = size;
2271}
2272
2273/*
2274 * Perform default configuration of DMA queues depending on the number and
2275 * type of ports we found and the number of available CPUs. Most settings can
2276 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2277 * being brought up for the first time.
2278 */
d289f864 2279static void cfg_queues(struct adapter *adapter)
be839e39
CL
2280{
2281 struct sge *s = &adapter->sge;
2282 int q10g, n10g, qidx, pidx, qs;
c710245c 2283 size_t iqe_size;
be839e39
CL
2284
2285 /*
2286 * We should not be called till we know how many Queue Sets we can
2287 * support. In particular, this means that we need to know what kind
2288 * of interrupts we'll be using ...
2289 */
2290 BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2291
2292 /*
2293 * Count the number of 10GbE Virtual Interfaces that we have.
2294 */
2295 n10g = 0;
2296 for_each_port(adapter, pidx)
2297 n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2298
2299 /*
2300 * We default to 1 queue per non-10G port and up to # of cores queues
2301 * per 10G port.
2302 */
2303 if (n10g == 0)
2304 q10g = 0;
2305 else {
2306 int n1g = (adapter->params.nports - n10g);
2307 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2308 if (q10g > num_online_cpus())
2309 q10g = num_online_cpus();
2310 }
2311
2312 /*
2313 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2314 * The layout will be established in setup_sge_queues() when the
2315 * adapter is brough up for the first time.
2316 */
2317 qidx = 0;
2318 for_each_port(adapter, pidx) {
2319 struct port_info *pi = adap2pinfo(adapter, pidx);
2320
2321 pi->first_qset = qidx;
2322 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2323 qidx += pi->nqsets;
2324 }
2325 s->ethqsets = qidx;
2326
c710245c
CL
2327 /*
2328 * The Ingress Queue Entry Size for our various Response Queues needs
2329 * to be big enough to accommodate the largest message we can receive
2330 * from the chip/firmware; which is 64 bytes ...
2331 */
2332 iqe_size = 64;
2333
be839e39
CL
2334 /*
2335 * Set up default Queue Set parameters ... Start off with the
2336 * shortest interrupt holdoff timer.
2337 */
2338 for (qs = 0; qs < s->max_ethqsets; qs++) {
2339 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2340 struct sge_eth_txq *txq = &s->ethtxq[qs];
2341
c710245c 2342 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
be839e39
CL
2343 rxq->fl.size = 72;
2344 txq->q.size = 1024;
2345 }
2346
2347 /*
2348 * The firmware event queue is used for link state changes and
2349 * notifications of TX DMA completions.
2350 */
c710245c 2351 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
be839e39
CL
2352
2353 /*
2354 * The forwarded interrupt queue is used when we're in MSI interrupt
2355 * mode. In this mode all interrupts associated with RX queues will
2356 * be forwarded to a single queue which we'll associate with our MSI
2357 * interrupt vector. The messages dropped in the forwarded interrupt
2358 * queue will indicate which ingress queue needs servicing ... This
2359 * queue needs to be large enough to accommodate all of the ingress
2360 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2361 * from equalling the CIDX if every ingress queue has an outstanding
2362 * interrupt). The queue doesn't need to be any larger because no
2363 * ingress queue will ever have more than one outstanding interrupt at
2364 * any time ...
2365 */
2366 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
c710245c 2367 iqe_size);
be839e39
CL
2368}
2369
2370/*
2371 * Reduce the number of Ethernet queues across all ports to at most n.
2372 * n provides at least one queue per port.
2373 */
d289f864 2374static void reduce_ethqs(struct adapter *adapter, int n)
be839e39
CL
2375{
2376 int i;
2377 struct port_info *pi;
2378
2379 /*
2380 * While we have too many active Ether Queue Sets, interate across the
2381 * "ports" and reduce their individual Queue Set allocations.
2382 */
2383 BUG_ON(n < adapter->params.nports);
2384 while (n < adapter->sge.ethqsets)
2385 for_each_port(adapter, i) {
2386 pi = adap2pinfo(adapter, i);
2387 if (pi->nqsets > 1) {
2388 pi->nqsets--;
2389 adapter->sge.ethqsets--;
2390 if (adapter->sge.ethqsets <= n)
2391 break;
2392 }
2393 }
2394
2395 /*
2396 * Reassign the starting Queue Sets for each of the "ports" ...
2397 */
2398 n = 0;
2399 for_each_port(adapter, i) {
2400 pi = adap2pinfo(adapter, i);
2401 pi->first_qset = n;
2402 n += pi->nqsets;
2403 }
2404}
2405
2406/*
2407 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2408 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2409 * need. Minimally we need one for every Virtual Interface plus those needed
2410 * for our "extras". Note that this process may lower the maximum number of
2411 * allowed Queue Sets ...
2412 */
d289f864 2413static int enable_msix(struct adapter *adapter)
be839e39
CL
2414{
2415 int i, err, want, need;
2416 struct msix_entry entries[MSIX_ENTRIES];
2417 struct sge *s = &adapter->sge;
2418
2419 for (i = 0; i < MSIX_ENTRIES; ++i)
2420 entries[i].entry = i;
2421
2422 /*
2423 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2424 * plus those needed for our "extras" (for example, the firmware
2425 * message queue). We _need_ at least one "Queue Set" per Virtual
2426 * Interface plus those needed for our "extras". So now we get to see
2427 * if the song is right ...
2428 */
2429 want = s->max_ethqsets + MSIX_EXTRAS;
2430 need = adapter->params.nports + MSIX_EXTRAS;
2431 while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
2432 want = err;
2433
2434 if (err == 0) {
2435 int nqsets = want - MSIX_EXTRAS;
2436 if (nqsets < s->max_ethqsets) {
2437 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2438 " for %d Queue Sets\n", nqsets);
2439 s->max_ethqsets = nqsets;
2440 if (nqsets < s->ethqsets)
2441 reduce_ethqs(adapter, nqsets);
2442 }
2443 for (i = 0; i < want; ++i)
2444 adapter->msix_info[i].vec = entries[i].vector;
2445 } else if (err > 0) {
2446 pci_disable_msix(adapter->pdev);
2447 dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
2448 " not using MSI-X\n", err);
2449 }
2450 return err;
2451}
2452
be839e39
CL
2453static const struct net_device_ops cxgb4vf_netdev_ops = {
2454 .ndo_open = cxgb4vf_open,
2455 .ndo_stop = cxgb4vf_stop,
2456 .ndo_start_xmit = t4vf_eth_xmit,
2457 .ndo_get_stats = cxgb4vf_get_stats,
2458 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2459 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
be839e39
CL
2460 .ndo_validate_addr = eth_validate_addr,
2461 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2462 .ndo_change_mtu = cxgb4vf_change_mtu,
87737663
JP
2463 .ndo_fix_features = cxgb4vf_fix_features,
2464 .ndo_set_features = cxgb4vf_set_features,
be839e39
CL
2465#ifdef CONFIG_NET_POLL_CONTROLLER
2466 .ndo_poll_controller = cxgb4vf_poll_controller,
2467#endif
2468};
be839e39
CL
2469
2470/*
2471 * "Probe" a device: initialize a device and construct all kernel and driver
2472 * state needed to manage the device. This routine is called "init_one" in
2473 * the PF Driver ...
2474 */
d289f864 2475static int cxgb4vf_pci_probe(struct pci_dev *pdev,
1dd06ae8 2476 const struct pci_device_id *ent)
be839e39 2477{
be839e39
CL
2478 int pci_using_dac;
2479 int err, pidx;
2480 unsigned int pmask;
2481 struct adapter *adapter;
2482 struct port_info *pi;
2483 struct net_device *netdev;
2484
be839e39
CL
2485 /*
2486 * Print our driver banner the first time we're called to initialize a
2487 * device.
2488 */
428ac43f 2489 pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
be839e39
CL
2490
2491 /*
7a0c2029 2492 * Initialize generic PCI device state.
be839e39 2493 */
7a0c2029 2494 err = pci_enable_device(pdev);
be839e39 2495 if (err) {
7a0c2029 2496 dev_err(&pdev->dev, "cannot enable PCI device\n");
be839e39
CL
2497 return err;
2498 }
2499
2500 /*
7a0c2029
KV
2501 * Reserve PCI resources for the device. If we can't get them some
2502 * other driver may have already claimed the device ...
be839e39 2503 */
7a0c2029 2504 err = pci_request_regions(pdev, KBUILD_MODNAME);
be839e39 2505 if (err) {
7a0c2029
KV
2506 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2507 goto err_disable_device;
be839e39
CL
2508 }
2509
2510 /*
2511 * Set up our DMA mask: try for 64-bit address masking first and
2512 * fall back to 32-bit if we can't get 64 bits ...
2513 */
2514 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2515 if (err == 0) {
2516 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2517 if (err) {
2518 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2519 " coherent allocations\n");
7a0c2029 2520 goto err_release_regions;
be839e39
CL
2521 }
2522 pci_using_dac = 1;
2523 } else {
2524 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2525 if (err != 0) {
2526 dev_err(&pdev->dev, "no usable DMA configuration\n");
7a0c2029 2527 goto err_release_regions;
be839e39
CL
2528 }
2529 pci_using_dac = 0;
2530 }
2531
2532 /*
2533 * Enable bus mastering for the device ...
2534 */
2535 pci_set_master(pdev);
2536
2537 /*
2538 * Allocate our adapter data structure and attach it to the device.
2539 */
2540 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2541 if (!adapter) {
2542 err = -ENOMEM;
7a0c2029 2543 goto err_release_regions;
be839e39
CL
2544 }
2545 pci_set_drvdata(pdev, adapter);
2546 adapter->pdev = pdev;
2547 adapter->pdev_dev = &pdev->dev;
2548
2549 /*
2550 * Initialize SMP data synchronization resources.
2551 */
2552 spin_lock_init(&adapter->stats_lock);
2553
2554 /*
2555 * Map our I/O registers in BAR0.
2556 */
2557 adapter->regs = pci_ioremap_bar(pdev, 0);
2558 if (!adapter->regs) {
2559 dev_err(&pdev->dev, "cannot map device registers\n");
2560 err = -ENOMEM;
2561 goto err_free_adapter;
2562 }
2563
2564 /*
2565 * Initialize adapter level features.
2566 */
2567 adapter->name = pci_name(pdev);
2568 adapter->msg_enable = dflt_msg_enable;
2569 err = adap_init0(adapter);
2570 if (err)
2571 goto err_unmap_bar;
2572
2573 /*
2574 * Allocate our "adapter ports" and stitch everything together.
2575 */
2576 pmask = adapter->params.vfres.pmask;
2577 for_each_port(adapter, pidx) {
2578 int port_id, viid;
2579
2580 /*
2581 * We simplistically allocate our virtual interfaces
2582 * sequentially across the port numbers to which we have
2583 * access rights. This should be configurable in some manner
2584 * ...
2585 */
2586 if (pmask == 0)
2587 break;
2588 port_id = ffs(pmask) - 1;
2589 pmask &= ~(1 << port_id);
2590 viid = t4vf_alloc_vi(adapter, port_id);
2591 if (viid < 0) {
2592 dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2593 " err=%d\n", port_id, viid);
2594 err = viid;
2595 goto err_free_dev;
2596 }
2597
2598 /*
2599 * Allocate our network device and stitch things together.
2600 */
2601 netdev = alloc_etherdev_mq(sizeof(struct port_info),
2602 MAX_PORT_QSETS);
2603 if (netdev == NULL) {
be839e39
CL
2604 t4vf_free_vi(adapter, viid);
2605 err = -ENOMEM;
2606 goto err_free_dev;
2607 }
2608 adapter->port[pidx] = netdev;
2609 SET_NETDEV_DEV(netdev, &pdev->dev);
2610 pi = netdev_priv(netdev);
2611 pi->adapter = adapter;
2612 pi->pidx = pidx;
2613 pi->port_id = port_id;
2614 pi->viid = viid;
2615
2616 /*
2617 * Initialize the starting state of our "port" and register
2618 * it.
2619 */
2620 pi->xact_addr_filt = -1;
be839e39 2621 netif_carrier_off(netdev);
be839e39
CL
2622 netdev->irq = pdev->irq;
2623
2ed28baa
MM
2624 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2625 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
f646968f 2626 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2ed28baa
MM
2627 netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2628 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2629 NETIF_F_HIGHDMA;
f646968f
PM
2630 netdev->features = netdev->hw_features |
2631 NETIF_F_HW_VLAN_CTAG_TX;
be839e39
CL
2632 if (pci_using_dac)
2633 netdev->features |= NETIF_F_HIGHDMA;
be839e39 2634
01789349
JP
2635 netdev->priv_flags |= IFF_UNICAST_FLT;
2636
be839e39 2637 netdev->netdev_ops = &cxgb4vf_netdev_ops;
be839e39
CL
2638 SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
2639
2640 /*
2641 * Initialize the hardware/software state for the port.
2642 */
2643 err = t4vf_port_init(adapter, pidx);
2644 if (err) {
2645 dev_err(&pdev->dev, "cannot initialize port %d\n",
2646 pidx);
2647 goto err_free_dev;
2648 }
2649 }
2650
2651 /*
2652 * The "card" is now ready to go. If any errors occur during device
2653 * registration we do not fail the whole "card" but rather proceed
2654 * only with the ports we manage to register successfully. However we
2655 * must register at least one net device.
2656 */
2657 for_each_port(adapter, pidx) {
2658 netdev = adapter->port[pidx];
2659 if (netdev == NULL)
2660 continue;
2661
2662 err = register_netdev(netdev);
2663 if (err) {
2664 dev_warn(&pdev->dev, "cannot register net device %s,"
2665 " skipping\n", netdev->name);
2666 continue;
2667 }
2668
2669 set_bit(pidx, &adapter->registered_device_map);
2670 }
2671 if (adapter->registered_device_map == 0) {
2672 dev_err(&pdev->dev, "could not register any net devices\n");
2673 goto err_free_dev;
2674 }
2675
2676 /*
2677 * Set up our debugfs entries.
2678 */
843635e0 2679 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
be839e39
CL
2680 adapter->debugfs_root =
2681 debugfs_create_dir(pci_name(pdev),
2682 cxgb4vf_debugfs_root);
843635e0 2683 if (IS_ERR_OR_NULL(adapter->debugfs_root))
be839e39
CL
2684 dev_warn(&pdev->dev, "could not create debugfs"
2685 " directory");
2686 else
2687 setup_debugfs(adapter);
2688 }
2689
2690 /*
2691 * See what interrupts we'll be using. If we've been configured to
2692 * use MSI-X interrupts, try to enable them but fall back to using
2693 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2694 * get MSI interrupts we bail with the error.
2695 */
2696 if (msi == MSI_MSIX && enable_msix(adapter) == 0)
2697 adapter->flags |= USING_MSIX;
2698 else {
2699 err = pci_enable_msi(pdev);
2700 if (err) {
2701 dev_err(&pdev->dev, "Unable to allocate %s interrupts;"
2702 " err=%d\n",
2703 msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err);
2704 goto err_free_debugfs;
2705 }
2706 adapter->flags |= USING_MSI;
2707 }
2708
2709 /*
2710 * Now that we know how many "ports" we have and what their types are,
2711 * and how many Queue Sets we can support, we can configure our queue
2712 * resources.
2713 */
2714 cfg_queues(adapter);
2715
2716 /*
25985edc 2717 * Print a short notice on the existence and configuration of the new
be839e39
CL
2718 * VF network device ...
2719 */
2720 for_each_port(adapter, pidx) {
2721 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
2722 adapter->port[pidx]->name,
2723 (adapter->flags & USING_MSIX) ? "MSI-X" :
2724 (adapter->flags & USING_MSI) ? "MSI" : "");
2725 }
2726
2727 /*
2728 * Return success!
2729 */
2730 return 0;
2731
2732 /*
2733 * Error recovery and exit code. Unwind state that's been created
2734 * so far and return the error.
2735 */
2736
2737err_free_debugfs:
843635e0 2738 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
be839e39
CL
2739 cleanup_debugfs(adapter);
2740 debugfs_remove_recursive(adapter->debugfs_root);
2741 }
2742
2743err_free_dev:
2744 for_each_port(adapter, pidx) {
2745 netdev = adapter->port[pidx];
2746 if (netdev == NULL)
2747 continue;
2748 pi = netdev_priv(netdev);
2749 t4vf_free_vi(adapter, pi->viid);
2750 if (test_bit(pidx, &adapter->registered_device_map))
2751 unregister_netdev(netdev);
2752 free_netdev(netdev);
2753 }
2754
2755err_unmap_bar:
2756 iounmap(adapter->regs);
2757
2758err_free_adapter:
2759 kfree(adapter);
2760 pci_set_drvdata(pdev, NULL);
2761
be839e39
CL
2762err_release_regions:
2763 pci_release_regions(pdev);
2764 pci_set_drvdata(pdev, NULL);
7a0c2029
KV
2765 pci_clear_master(pdev);
2766
2767err_disable_device:
2768 pci_disable_device(pdev);
be839e39 2769
be839e39
CL
2770 return err;
2771}
2772
2773/*
2774 * "Remove" a device: tear down all kernel and driver state created in the
2775 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2776 * that this is called "remove_one" in the PF Driver.)
2777 */
d289f864 2778static void cxgb4vf_pci_remove(struct pci_dev *pdev)
be839e39
CL
2779{
2780 struct adapter *adapter = pci_get_drvdata(pdev);
2781
2782 /*
2783 * Tear down driver state associated with device.
2784 */
2785 if (adapter) {
2786 int pidx;
2787
2788 /*
2789 * Stop all of our activity. Unregister network port,
2790 * disable interrupts, etc.
2791 */
2792 for_each_port(adapter, pidx)
2793 if (test_bit(pidx, &adapter->registered_device_map))
2794 unregister_netdev(adapter->port[pidx]);
2795 t4vf_sge_stop(adapter);
2796 if (adapter->flags & USING_MSIX) {
2797 pci_disable_msix(adapter->pdev);
2798 adapter->flags &= ~USING_MSIX;
2799 } else if (adapter->flags & USING_MSI) {
2800 pci_disable_msi(adapter->pdev);
2801 adapter->flags &= ~USING_MSI;
2802 }
2803
2804 /*
2805 * Tear down our debugfs entries.
2806 */
843635e0 2807 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
be839e39
CL
2808 cleanup_debugfs(adapter);
2809 debugfs_remove_recursive(adapter->debugfs_root);
2810 }
2811
2812 /*
2813 * Free all of the various resources which we've acquired ...
2814 */
2815 t4vf_free_sge_resources(adapter);
2816 for_each_port(adapter, pidx) {
2817 struct net_device *netdev = adapter->port[pidx];
2818 struct port_info *pi;
2819
2820 if (netdev == NULL)
2821 continue;
2822
2823 pi = netdev_priv(netdev);
2824 t4vf_free_vi(adapter, pi->viid);
2825 free_netdev(netdev);
2826 }
2827 iounmap(adapter->regs);
2828 kfree(adapter);
2829 pci_set_drvdata(pdev, NULL);
2830 }
2831
2832 /*
2833 * Disable the device and release its PCI resources.
2834 */
2835 pci_disable_device(pdev);
2836 pci_clear_master(pdev);
2837 pci_release_regions(pdev);
2838}
2839
7e9c2629
CL
2840/*
2841 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2842 * delivery.
2843 */
d289f864 2844static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
7e9c2629
CL
2845{
2846 struct adapter *adapter;
2847 int pidx;
2848
2849 adapter = pci_get_drvdata(pdev);
2850 if (!adapter)
2851 return;
2852
2853 /*
2854 * Disable all Virtual Interfaces. This will shut down the
2855 * delivery of all ingress packets into the chip for these
2856 * Virtual Interfaces.
2857 */
2858 for_each_port(adapter, pidx) {
2859 struct net_device *netdev;
2860 struct port_info *pi;
2861
2862 if (!test_bit(pidx, &adapter->registered_device_map))
2863 continue;
2864
2865 netdev = adapter->port[pidx];
2866 if (!netdev)
2867 continue;
2868
2869 pi = netdev_priv(netdev);
2870 t4vf_enable_vi(adapter, pi->viid, false, false);
2871 }
2872
2873 /*
2874 * Free up all Queues which will prevent further DMA and
2875 * Interrupts allowing various internal pathways to drain.
2876 */
2877 t4vf_free_sge_resources(adapter);
2878}
2879
be839e39
CL
2880/*
2881 * PCI Device registration data structures.
2882 */
2883#define CH_DEVICE(devid, idx) \
2884 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
2885
2886static struct pci_device_id cxgb4vf_pci_tbl[] = {
2887 CH_DEVICE(0xb000, 0), /* PE10K FPGA */
2888 CH_DEVICE(0x4800, 0), /* T440-dbg */
2889 CH_DEVICE(0x4801, 0), /* T420-cr */
2890 CH_DEVICE(0x4802, 0), /* T422-cr */
8b6edf87
CL
2891 CH_DEVICE(0x4803, 0), /* T440-cr */
2892 CH_DEVICE(0x4804, 0), /* T420-bch */
2893 CH_DEVICE(0x4805, 0), /* T440-bch */
2894 CH_DEVICE(0x4806, 0), /* T460-ch */
2895 CH_DEVICE(0x4807, 0), /* T420-so */
2896 CH_DEVICE(0x4808, 0), /* T420-cx */
2897 CH_DEVICE(0x4809, 0), /* T420-bt */
2898 CH_DEVICE(0x480a, 0), /* T404-bt */
79421b4e
VP
2899 CH_DEVICE(0x480d, 0), /* T480-cr */
2900 CH_DEVICE(0x480e, 0), /* T440-lp-cr */
622c62b5
SR
2901 CH_DEVICE(0x5800, 0), /* T580-dbg */
2902 CH_DEVICE(0x5801, 0), /* T520-cr */
2903 CH_DEVICE(0x5802, 0), /* T522-cr */
2904 CH_DEVICE(0x5803, 0), /* T540-cr */
2905 CH_DEVICE(0x5804, 0), /* T520-bch */
2906 CH_DEVICE(0x5805, 0), /* T540-bch */
2907 CH_DEVICE(0x5806, 0), /* T540-ch */
2908 CH_DEVICE(0x5807, 0), /* T520-so */
2909 CH_DEVICE(0x5808, 0), /* T520-cx */
2910 CH_DEVICE(0x5809, 0), /* T520-bt */
2911 CH_DEVICE(0x580a, 0), /* T504-bt */
2912 CH_DEVICE(0x580b, 0), /* T520-sr */
2913 CH_DEVICE(0x580c, 0), /* T504-bt */
2914 CH_DEVICE(0x580d, 0), /* T580-cr */
2915 CH_DEVICE(0x580e, 0), /* T540-lp-cr */
2916 CH_DEVICE(0x580f, 0), /* Amsterdam */
2917 CH_DEVICE(0x5810, 0), /* T580-lp-cr */
2918 CH_DEVICE(0x5811, 0), /* T520-lp-cr */
2919 CH_DEVICE(0x5812, 0), /* T560-cr */
2920 CH_DEVICE(0x5813, 0), /* T580-cr */
be839e39
CL
2921 { 0, }
2922};
2923
2924MODULE_DESCRIPTION(DRV_DESC);
2925MODULE_AUTHOR("Chelsio Communications");
2926MODULE_LICENSE("Dual BSD/GPL");
2927MODULE_VERSION(DRV_VERSION);
2928MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
2929
2930static struct pci_driver cxgb4vf_driver = {
2931 .name = KBUILD_MODNAME,
2932 .id_table = cxgb4vf_pci_tbl,
2933 .probe = cxgb4vf_pci_probe,
d289f864
BP
2934 .remove = cxgb4vf_pci_remove,
2935 .shutdown = cxgb4vf_pci_shutdown,
be839e39
CL
2936};
2937
2938/*
2939 * Initialize global driver state.
2940 */
2941static int __init cxgb4vf_module_init(void)
2942{
2943 int ret;
2944
bb14a1af
CL
2945 /*
2946 * Vet our module parameters.
2947 */
2948 if (msi != MSI_MSIX && msi != MSI_MSI) {
428ac43f
JP
2949 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
2950 msi, MSI_MSIX, MSI_MSI);
bb14a1af
CL
2951 return -EINVAL;
2952 }
2953
be839e39
CL
2954 /* Debugfs support is optional, just warn if this fails */
2955 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
843635e0 2956 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
428ac43f 2957 pr_warn("could not create debugfs entry, continuing\n");
be839e39
CL
2958
2959 ret = pci_register_driver(&cxgb4vf_driver);
843635e0 2960 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
be839e39
CL
2961 debugfs_remove(cxgb4vf_debugfs_root);
2962 return ret;
2963}
2964
2965/*
2966 * Tear down global driver state.
2967 */
2968static void __exit cxgb4vf_module_exit(void)
2969{
2970 pci_unregister_driver(&cxgb4vf_driver);
2971 debugfs_remove(cxgb4vf_debugfs_root);
2972}
2973
2974module_init(cxgb4vf_module_init);
2975module_exit(cxgb4vf_module_exit);
This page took 0.446135 seconds and 5 git commands to generate.