Merge tag 'kselftest-3.18-updates-1' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
65
66 #include "cxgb4.h"
67 #include "t4_regs.h"
68 #include "t4_msg.h"
69 #include "t4fw_api.h"
70 #include "cxgb4_dcb.h"
71 #include "l2t.h"
72
73 #include <../drivers/net/bonding/bonding.h>
74
75 #ifdef DRV_VERSION
76 #undef DRV_VERSION
77 #endif
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
80
81 /*
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
84 * recover.
85 */
86 #define MAX_SGE_TIMERVAL 200U
87
88 enum {
89 /*
90 * Physical Function provisioning constants.
91 */
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 */
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103
104 #ifdef CONFIG_PCI_IOV
105 /*
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
111 * Egress Context.
112 */
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126 #endif
127 };
128
129 /*
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
133 * controls.
134 */
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
137 {
138 unsigned int portn, portvec;
139
140 /*
141 * Give PF's access to all of the ports.
142 */
143 if (vf == 0)
144 return FW_PFVF_CMD_PMASK_MASK;
145
146 /*
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
152 */
153 if (adapter->params.nports == 0)
154 return 0;
155
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
158 for (;;) {
159 /*
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
164 */
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 if (portn == 0)
167 return pmask;
168 portn--;
169 portvec &= ~pmask;
170 }
171 /*NOTREACHED*/
172 }
173
174 enum {
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
182 MIN_FL_ENTRIES = 16
183 };
184
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
190 */
191 struct filter_entry {
192 /* Administrative fields for filter.
193 */
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
196
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 */
206 struct ch_filter_specification fs;
207 };
208
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214
215 static const struct pci_device_id cxgb4_pci_tbl[] = {
216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
286 CH_DEVICE(0x5086, 4),
287 CH_DEVICE(0x5087, 4),
288 CH_DEVICE(0x5088, 4),
289 CH_DEVICE(0x5401, 4),
290 CH_DEVICE(0x5402, 4),
291 CH_DEVICE(0x5403, 4),
292 CH_DEVICE(0x5404, 4),
293 CH_DEVICE(0x5405, 4),
294 CH_DEVICE(0x5406, 4),
295 CH_DEVICE(0x5407, 4),
296 CH_DEVICE(0x5408, 4),
297 CH_DEVICE(0x5409, 4),
298 CH_DEVICE(0x540A, 4),
299 CH_DEVICE(0x540B, 4),
300 CH_DEVICE(0x540C, 4),
301 CH_DEVICE(0x540D, 4),
302 CH_DEVICE(0x540E, 4),
303 CH_DEVICE(0x540F, 4),
304 CH_DEVICE(0x5410, 4),
305 CH_DEVICE(0x5411, 4),
306 CH_DEVICE(0x5412, 4),
307 CH_DEVICE(0x5413, 4),
308 CH_DEVICE(0x5414, 4),
309 CH_DEVICE(0x5415, 4),
310 CH_DEVICE(0x5480, 4),
311 CH_DEVICE(0x5481, 4),
312 CH_DEVICE(0x5482, 4),
313 CH_DEVICE(0x5483, 4),
314 CH_DEVICE(0x5484, 4),
315 CH_DEVICE(0x5485, 4),
316 CH_DEVICE(0x5486, 4),
317 CH_DEVICE(0x5487, 4),
318 CH_DEVICE(0x5488, 4),
319 { 0, }
320 };
321
322 #define FW4_FNAME "cxgb4/t4fw.bin"
323 #define FW5_FNAME "cxgb4/t5fw.bin"
324 #define FW4_CFNAME "cxgb4/t4-config.txt"
325 #define FW5_CFNAME "cxgb4/t5-config.txt"
326
327 MODULE_DESCRIPTION(DRV_DESC);
328 MODULE_AUTHOR("Chelsio Communications");
329 MODULE_LICENSE("Dual BSD/GPL");
330 MODULE_VERSION(DRV_VERSION);
331 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
332 MODULE_FIRMWARE(FW4_FNAME);
333 MODULE_FIRMWARE(FW5_FNAME);
334
335 /*
336 * Normally we're willing to become the firmware's Master PF but will be happy
337 * if another PF has already become the Master and initialized the adapter.
338 * Setting "force_init" will cause this driver to forcibly establish itself as
339 * the Master PF and initialize the adapter.
340 */
341 static uint force_init;
342
343 module_param(force_init, uint, 0644);
344 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
345
346 /*
347 * Normally if the firmware we connect to has Configuration File support, we
348 * use that and only fall back to the old Driver-based initialization if the
349 * Configuration File fails for some reason. If force_old_init is set, then
350 * we'll always use the old Driver-based initialization sequence.
351 */
352 static uint force_old_init;
353
354 module_param(force_old_init, uint, 0644);
355 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
356
357 static int dflt_msg_enable = DFLT_MSG_ENABLE;
358
359 module_param(dflt_msg_enable, int, 0644);
360 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
361
362 /*
363 * The driver uses the best interrupt scheme available on a platform in the
364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
365 * of these schemes the driver may consider as follows:
366 *
367 * msi = 2: choose from among all three options
368 * msi = 1: only consider MSI and INTx interrupts
369 * msi = 0: force INTx interrupts
370 */
371 static int msi = 2;
372
373 module_param(msi, int, 0644);
374 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
375
376 /*
377 * Queue interrupt hold-off timer values. Queues default to the first of these
378 * upon creation.
379 */
380 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
381
382 module_param_array(intr_holdoff, uint, NULL, 0644);
383 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
384 "0..4 in microseconds");
385
386 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
387
388 module_param_array(intr_cnt, uint, NULL, 0644);
389 MODULE_PARM_DESC(intr_cnt,
390 "thresholds 1..3 for queue interrupt packet counters");
391
392 /*
393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394 * offset by 2 bytes in order to have the IP headers line up on 4-byte
395 * boundaries. This is a requirement for many architectures which will throw
396 * a machine check fault if an attempt is made to access one of the 4-byte IP
397 * header fields on a non-4-byte boundary. And it's a major performance issue
398 * even on some architectures which allow it like some implementations of the
399 * x86 ISA. However, some architectures don't mind this and for some very
400 * edge-case performance sensitive applications (like forwarding large volumes
401 * of small packets), setting this DMA offset to 0 will decrease the number of
402 * PCI-E Bus transfers enough to measurably affect performance.
403 */
404 static int rx_dma_offset = 2;
405
406 static bool vf_acls;
407
408 #ifdef CONFIG_PCI_IOV
409 module_param(vf_acls, bool, 0644);
410 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
411
412 /* Configure the number of PCI-E Virtual Function which are to be instantiated
413 * on SR-IOV Capable Physical Functions.
414 */
415 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
416
417 module_param_array(num_vf, uint, NULL, 0644);
418 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
419 #endif
420
421 /* TX Queue select used to determine what algorithm to use for selecting TX
422 * queue. Select between the kernel provided function (select_queue=0) or user
423 * cxgb_select_queue function (select_queue=1)
424 *
425 * Default: select_queue=0
426 */
427 static int select_queue;
428 module_param(select_queue, int, 0644);
429 MODULE_PARM_DESC(select_queue,
430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
431
432 /*
433 * The filter TCAM has a fixed portion and a variable portion. The fixed
434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435 * ports. The variable portion is 36 bits which can include things like Exact
436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438 * far exceed the 36-bit budget for this "compressed" header portion of the
439 * filter. Thus, we have a scarce resource which must be carefully managed.
440 *
441 * By default we set this up to mostly match the set of filter matching
442 * capabilities of T3 but with accommodations for some of T4's more
443 * interesting features:
444 *
445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446 * [Inner] VLAN (17), Port (3), FCoE (1) }
447 */
448 enum {
449 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
450 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
451 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
452 };
453
454 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
455
456 module_param(tp_vlan_pri_map, uint, 0644);
457 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
458
459 static struct dentry *cxgb4_debugfs_root;
460
461 static LIST_HEAD(adapter_list);
462 static DEFINE_MUTEX(uld_mutex);
463 /* Adapter list to be accessed from atomic context */
464 static LIST_HEAD(adap_rcu_list);
465 static DEFINE_SPINLOCK(adap_rcu_lock);
466 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
467 static const char *uld_str[] = { "RDMA", "iSCSI" };
468
469 static void link_report(struct net_device *dev)
470 {
471 if (!netif_carrier_ok(dev))
472 netdev_info(dev, "link down\n");
473 else {
474 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
475
476 const char *s = "10Mbps";
477 const struct port_info *p = netdev_priv(dev);
478
479 switch (p->link_cfg.speed) {
480 case 10000:
481 s = "10Gbps";
482 break;
483 case 1000:
484 s = "1000Mbps";
485 break;
486 case 100:
487 s = "100Mbps";
488 break;
489 case 40000:
490 s = "40Gbps";
491 break;
492 }
493
494 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
495 fc[p->link_cfg.fc]);
496 }
497 }
498
499 #ifdef CONFIG_CHELSIO_T4_DCB
500 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
501 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
502 {
503 struct port_info *pi = netdev_priv(dev);
504 struct adapter *adap = pi->adapter;
505 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
506 int i;
507
508 /* We use a simple mapping of Port TX Queue Index to DCB
509 * Priority when we're enabling DCB.
510 */
511 for (i = 0; i < pi->nqsets; i++, txq++) {
512 u32 name, value;
513 int err;
514
515 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
517 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
518 value = enable ? i : 0xffffffff;
519
520 /* Since we can be called while atomic (from "interrupt
521 * level") we need to issue the Set Parameters Commannd
522 * without sleeping (timeout < 0).
523 */
524 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
525 &name, &value);
526
527 if (err)
528 dev_err(adap->pdev_dev,
529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 enable ? "set" : "unset", pi->port_id, i, -err);
531 else
532 txq->dcb_prio = value;
533 }
534 }
535 #endif /* CONFIG_CHELSIO_T4_DCB */
536
537 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
538 {
539 struct net_device *dev = adapter->port[port_id];
540
541 /* Skip changes from disabled ports. */
542 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
543 if (link_stat)
544 netif_carrier_on(dev);
545 else {
546 #ifdef CONFIG_CHELSIO_T4_DCB
547 cxgb4_dcb_state_init(dev);
548 dcb_tx_queue_prio_enable(dev, false);
549 #endif /* CONFIG_CHELSIO_T4_DCB */
550 netif_carrier_off(dev);
551 }
552
553 link_report(dev);
554 }
555 }
556
557 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
558 {
559 static const char *mod_str[] = {
560 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
561 };
562
563 const struct net_device *dev = adap->port[port_id];
564 const struct port_info *pi = netdev_priv(dev);
565
566 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
567 netdev_info(dev, "port module unplugged\n");
568 else if (pi->mod_type < ARRAY_SIZE(mod_str))
569 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
570 }
571
572 /*
573 * Configure the exact and hash address filters to handle a port's multicast
574 * and secondary unicast MAC addresses.
575 */
576 static int set_addr_filters(const struct net_device *dev, bool sleep)
577 {
578 u64 mhash = 0;
579 u64 uhash = 0;
580 bool free = true;
581 u16 filt_idx[7];
582 const u8 *addr[7];
583 int ret, naddr = 0;
584 const struct netdev_hw_addr *ha;
585 int uc_cnt = netdev_uc_count(dev);
586 int mc_cnt = netdev_mc_count(dev);
587 const struct port_info *pi = netdev_priv(dev);
588 unsigned int mb = pi->adapter->fn;
589
590 /* first do the secondary unicast addresses */
591 netdev_for_each_uc_addr(ha, dev) {
592 addr[naddr++] = ha->addr;
593 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
594 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
595 naddr, addr, filt_idx, &uhash, sleep);
596 if (ret < 0)
597 return ret;
598
599 free = false;
600 naddr = 0;
601 }
602 }
603
604 /* next set up the multicast addresses */
605 netdev_for_each_mc_addr(ha, dev) {
606 addr[naddr++] = ha->addr;
607 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
608 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
609 naddr, addr, filt_idx, &mhash, sleep);
610 if (ret < 0)
611 return ret;
612
613 free = false;
614 naddr = 0;
615 }
616 }
617
618 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
619 uhash | mhash, sleep);
620 }
621
622 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
623 module_param(dbfifo_int_thresh, int, 0644);
624 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
625
626 /*
627 * usecs to sleep while draining the dbfifo
628 */
629 static int dbfifo_drain_delay = 1000;
630 module_param(dbfifo_drain_delay, int, 0644);
631 MODULE_PARM_DESC(dbfifo_drain_delay,
632 "usecs to sleep while draining the dbfifo");
633
634 /*
635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636 * If @mtu is -1 it is left unchanged.
637 */
638 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
639 {
640 int ret;
641 struct port_info *pi = netdev_priv(dev);
642
643 ret = set_addr_filters(dev, sleep_ok);
644 if (ret == 0)
645 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
646 (dev->flags & IFF_PROMISC) ? 1 : 0,
647 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
648 sleep_ok);
649 return ret;
650 }
651
652 /**
653 * link_start - enable a port
654 * @dev: the port to enable
655 *
656 * Performs the MAC and PHY actions needed to enable a port.
657 */
658 static int link_start(struct net_device *dev)
659 {
660 int ret;
661 struct port_info *pi = netdev_priv(dev);
662 unsigned int mb = pi->adapter->fn;
663
664 /*
665 * We do not set address filters and promiscuity here, the stack does
666 * that step explicitly.
667 */
668 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
669 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
670 if (ret == 0) {
671 ret = t4_change_mac(pi->adapter, mb, pi->viid,
672 pi->xact_addr_filt, dev->dev_addr, true,
673 true);
674 if (ret >= 0) {
675 pi->xact_addr_filt = ret;
676 ret = 0;
677 }
678 }
679 if (ret == 0)
680 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
681 &pi->link_cfg);
682 if (ret == 0) {
683 local_bh_disable();
684 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
685 true, CXGB4_DCB_ENABLED);
686 local_bh_enable();
687 }
688
689 return ret;
690 }
691
692 int cxgb4_dcb_enabled(const struct net_device *dev)
693 {
694 #ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev);
696
697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
698 #else
699 return 0;
700 #endif
701 }
702 EXPORT_SYMBOL(cxgb4_dcb_enabled);
703
704 #ifdef CONFIG_CHELSIO_T4_DCB
705 /* Handle a Data Center Bridging update message from the firmware. */
706 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
707 {
708 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
709 struct net_device *dev = adap->port[port];
710 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
711 int new_dcb_enabled;
712
713 cxgb4_dcb_handle_fw_update(adap, pcmd);
714 new_dcb_enabled = cxgb4_dcb_enabled(dev);
715
716 /* If the DCB has become enabled or disabled on the port then we're
717 * going to need to set up/tear down DCB Priority parameters for the
718 * TX Queues associated with the port.
719 */
720 if (new_dcb_enabled != old_dcb_enabled)
721 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
722 }
723 #endif /* CONFIG_CHELSIO_T4_DCB */
724
725 /* Clear a filter and release any of its resources that we own. This also
726 * clears the filter's "pending" status.
727 */
728 static void clear_filter(struct adapter *adap, struct filter_entry *f)
729 {
730 /* If the new or old filter have loopback rewriteing rules then we'll
731 * need to free any existing Layer Two Table (L2T) entries of the old
732 * filter rule. The firmware will handle freeing up any Source MAC
733 * Table (SMT) entries used for rewriting Source MAC Addresses in
734 * loopback rules.
735 */
736 if (f->l2t)
737 cxgb4_l2t_release(f->l2t);
738
739 /* The zeroing of the filter rule below clears the filter valid,
740 * pending, locked flags, l2t pointer, etc. so it's all we need for
741 * this operation.
742 */
743 memset(f, 0, sizeof(*f));
744 }
745
746 /* Handle a filter write/deletion reply.
747 */
748 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
749 {
750 unsigned int idx = GET_TID(rpl);
751 unsigned int nidx = idx - adap->tids.ftid_base;
752 unsigned int ret;
753 struct filter_entry *f;
754
755 if (idx >= adap->tids.ftid_base && nidx <
756 (adap->tids.nftids + adap->tids.nsftids)) {
757 idx = nidx;
758 ret = GET_TCB_COOKIE(rpl->cookie);
759 f = &adap->tids.ftid_tab[idx];
760
761 if (ret == FW_FILTER_WR_FLT_DELETED) {
762 /* Clear the filter when we get confirmation from the
763 * hardware that the filter has been deleted.
764 */
765 clear_filter(adap, f);
766 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
767 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
768 idx);
769 clear_filter(adap, f);
770 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
771 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
772 f->pending = 0; /* asynchronous setup completed */
773 f->valid = 1;
774 } else {
775 /* Something went wrong. Issue a warning about the
776 * problem and clear everything out.
777 */
778 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
779 idx, ret);
780 clear_filter(adap, f);
781 }
782 }
783 }
784
785 /* Response queue handler for the FW event queue.
786 */
787 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
788 const struct pkt_gl *gl)
789 {
790 u8 opcode = ((const struct rss_header *)rsp)->opcode;
791
792 rsp++; /* skip RSS header */
793
794 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
795 */
796 if (unlikely(opcode == CPL_FW4_MSG &&
797 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
798 rsp++;
799 opcode = ((const struct rss_header *)rsp)->opcode;
800 rsp++;
801 if (opcode != CPL_SGE_EGR_UPDATE) {
802 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
803 , opcode);
804 goto out;
805 }
806 }
807
808 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
809 const struct cpl_sge_egr_update *p = (void *)rsp;
810 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
811 struct sge_txq *txq;
812
813 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
814 txq->restarts++;
815 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
816 struct sge_eth_txq *eq;
817
818 eq = container_of(txq, struct sge_eth_txq, q);
819 netif_tx_wake_queue(eq->txq);
820 } else {
821 struct sge_ofld_txq *oq;
822
823 oq = container_of(txq, struct sge_ofld_txq, q);
824 tasklet_schedule(&oq->qresume_tsk);
825 }
826 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
827 const struct cpl_fw6_msg *p = (void *)rsp;
828
829 #ifdef CONFIG_CHELSIO_T4_DCB
830 const struct fw_port_cmd *pcmd = (const void *)p->data;
831 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
832 unsigned int action =
833 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
834
835 if (cmd == FW_PORT_CMD &&
836 action == FW_PORT_ACTION_GET_PORT_INFO) {
837 int port = FW_PORT_CMD_PORTID_GET(
838 be32_to_cpu(pcmd->op_to_portid));
839 struct net_device *dev = q->adap->port[port];
840 int state_input = ((pcmd->u.info.dcbxdis_pkd &
841 FW_PORT_CMD_DCBXDIS)
842 ? CXGB4_DCB_INPUT_FW_DISABLED
843 : CXGB4_DCB_INPUT_FW_ENABLED);
844
845 cxgb4_dcb_state_fsm(dev, state_input);
846 }
847
848 if (cmd == FW_PORT_CMD &&
849 action == FW_PORT_ACTION_L2_DCB_CFG)
850 dcb_rpl(q->adap, pcmd);
851 else
852 #endif
853 if (p->type == 0)
854 t4_handle_fw_rpl(q->adap, p->data);
855 } else if (opcode == CPL_L2T_WRITE_RPL) {
856 const struct cpl_l2t_write_rpl *p = (void *)rsp;
857
858 do_l2t_write_rpl(q->adap, p);
859 } else if (opcode == CPL_SET_TCB_RPL) {
860 const struct cpl_set_tcb_rpl *p = (void *)rsp;
861
862 filter_rpl(q->adap, p);
863 } else
864 dev_err(q->adap->pdev_dev,
865 "unexpected CPL %#x on FW event queue\n", opcode);
866 out:
867 return 0;
868 }
869
870 /**
871 * uldrx_handler - response queue handler for ULD queues
872 * @q: the response queue that received the packet
873 * @rsp: the response queue descriptor holding the offload message
874 * @gl: the gather list of packet fragments
875 *
876 * Deliver an ingress offload packet to a ULD. All processing is done by
877 * the ULD, we just maintain statistics.
878 */
879 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
880 const struct pkt_gl *gl)
881 {
882 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
883
884 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
885 */
886 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
887 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
888 rsp += 2;
889
890 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
891 rxq->stats.nomem++;
892 return -1;
893 }
894 if (gl == NULL)
895 rxq->stats.imm++;
896 else if (gl == CXGB4_MSG_AN)
897 rxq->stats.an++;
898 else
899 rxq->stats.pkts++;
900 return 0;
901 }
902
903 static void disable_msi(struct adapter *adapter)
904 {
905 if (adapter->flags & USING_MSIX) {
906 pci_disable_msix(adapter->pdev);
907 adapter->flags &= ~USING_MSIX;
908 } else if (adapter->flags & USING_MSI) {
909 pci_disable_msi(adapter->pdev);
910 adapter->flags &= ~USING_MSI;
911 }
912 }
913
914 /*
915 * Interrupt handler for non-data events used with MSI-X.
916 */
917 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
918 {
919 struct adapter *adap = cookie;
920
921 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
922 if (v & PFSW) {
923 adap->swintr = 1;
924 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
925 }
926 t4_slow_intr_handler(adap);
927 return IRQ_HANDLED;
928 }
929
930 /*
931 * Name the MSI-X interrupts.
932 */
933 static void name_msix_vecs(struct adapter *adap)
934 {
935 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
936
937 /* non-data interrupts */
938 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
939
940 /* FW events */
941 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
942 adap->port[0]->name);
943
944 /* Ethernet queues */
945 for_each_port(adap, j) {
946 struct net_device *d = adap->port[j];
947 const struct port_info *pi = netdev_priv(d);
948
949 for (i = 0; i < pi->nqsets; i++, msi_idx++)
950 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
951 d->name, i);
952 }
953
954 /* offload queues */
955 for_each_ofldrxq(&adap->sge, i)
956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
957 adap->port[0]->name, i);
958
959 for_each_rdmarxq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
961 adap->port[0]->name, i);
962
963 for_each_rdmaciq(&adap->sge, i)
964 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
965 adap->port[0]->name, i);
966 }
967
968 static int request_msix_queue_irqs(struct adapter *adap)
969 {
970 struct sge *s = &adap->sge;
971 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
972 int msi_index = 2;
973
974 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
975 adap->msix_info[1].desc, &s->fw_evtq);
976 if (err)
977 return err;
978
979 for_each_ethrxq(s, ethqidx) {
980 err = request_irq(adap->msix_info[msi_index].vec,
981 t4_sge_intr_msix, 0,
982 adap->msix_info[msi_index].desc,
983 &s->ethrxq[ethqidx].rspq);
984 if (err)
985 goto unwind;
986 msi_index++;
987 }
988 for_each_ofldrxq(s, ofldqidx) {
989 err = request_irq(adap->msix_info[msi_index].vec,
990 t4_sge_intr_msix, 0,
991 adap->msix_info[msi_index].desc,
992 &s->ofldrxq[ofldqidx].rspq);
993 if (err)
994 goto unwind;
995 msi_index++;
996 }
997 for_each_rdmarxq(s, rdmaqidx) {
998 err = request_irq(adap->msix_info[msi_index].vec,
999 t4_sge_intr_msix, 0,
1000 adap->msix_info[msi_index].desc,
1001 &s->rdmarxq[rdmaqidx].rspq);
1002 if (err)
1003 goto unwind;
1004 msi_index++;
1005 }
1006 for_each_rdmaciq(s, rdmaciqqidx) {
1007 err = request_irq(adap->msix_info[msi_index].vec,
1008 t4_sge_intr_msix, 0,
1009 adap->msix_info[msi_index].desc,
1010 &s->rdmaciq[rdmaciqqidx].rspq);
1011 if (err)
1012 goto unwind;
1013 msi_index++;
1014 }
1015 return 0;
1016
1017 unwind:
1018 while (--rdmaciqqidx >= 0)
1019 free_irq(adap->msix_info[--msi_index].vec,
1020 &s->rdmaciq[rdmaciqqidx].rspq);
1021 while (--rdmaqidx >= 0)
1022 free_irq(adap->msix_info[--msi_index].vec,
1023 &s->rdmarxq[rdmaqidx].rspq);
1024 while (--ofldqidx >= 0)
1025 free_irq(adap->msix_info[--msi_index].vec,
1026 &s->ofldrxq[ofldqidx].rspq);
1027 while (--ethqidx >= 0)
1028 free_irq(adap->msix_info[--msi_index].vec,
1029 &s->ethrxq[ethqidx].rspq);
1030 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1031 return err;
1032 }
1033
1034 static void free_msix_queue_irqs(struct adapter *adap)
1035 {
1036 int i, msi_index = 2;
1037 struct sge *s = &adap->sge;
1038
1039 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1040 for_each_ethrxq(s, i)
1041 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1042 for_each_ofldrxq(s, i)
1043 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1044 for_each_rdmarxq(s, i)
1045 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1046 for_each_rdmaciq(s, i)
1047 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1048 }
1049
1050 /**
1051 * write_rss - write the RSS table for a given port
1052 * @pi: the port
1053 * @queues: array of queue indices for RSS
1054 *
1055 * Sets up the portion of the HW RSS table for the port's VI to distribute
1056 * packets to the Rx queues in @queues.
1057 */
1058 static int write_rss(const struct port_info *pi, const u16 *queues)
1059 {
1060 u16 *rss;
1061 int i, err;
1062 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1063
1064 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1065 if (!rss)
1066 return -ENOMEM;
1067
1068 /* map the queue indices to queue ids */
1069 for (i = 0; i < pi->rss_size; i++, queues++)
1070 rss[i] = q[*queues].rspq.abs_id;
1071
1072 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1073 pi->rss_size, rss, pi->rss_size);
1074 kfree(rss);
1075 return err;
1076 }
1077
1078 /**
1079 * setup_rss - configure RSS
1080 * @adap: the adapter
1081 *
1082 * Sets up RSS for each port.
1083 */
1084 static int setup_rss(struct adapter *adap)
1085 {
1086 int i, err;
1087
1088 for_each_port(adap, i) {
1089 const struct port_info *pi = adap2pinfo(adap, i);
1090
1091 err = write_rss(pi, pi->rss);
1092 if (err)
1093 return err;
1094 }
1095 return 0;
1096 }
1097
1098 /*
1099 * Return the channel of the ingress queue with the given qid.
1100 */
1101 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1102 {
1103 qid -= p->ingr_start;
1104 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1105 }
1106
1107 /*
1108 * Wait until all NAPI handlers are descheduled.
1109 */
1110 static void quiesce_rx(struct adapter *adap)
1111 {
1112 int i;
1113
1114 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1115 struct sge_rspq *q = adap->sge.ingr_map[i];
1116
1117 if (q && q->handler)
1118 napi_disable(&q->napi);
1119 }
1120 }
1121
1122 /*
1123 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1124 */
1125 static void enable_rx(struct adapter *adap)
1126 {
1127 int i;
1128
1129 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1130 struct sge_rspq *q = adap->sge.ingr_map[i];
1131
1132 if (!q)
1133 continue;
1134 if (q->handler)
1135 napi_enable(&q->napi);
1136 /* 0-increment GTS to start the timer and enable interrupts */
1137 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1138 SEINTARM(q->intr_params) |
1139 INGRESSQID(q->cntxt_id));
1140 }
1141 }
1142
1143 /**
1144 * setup_sge_queues - configure SGE Tx/Rx/response queues
1145 * @adap: the adapter
1146 *
1147 * Determines how many sets of SGE queues to use and initializes them.
1148 * We support multiple queue sets per port if we have MSI-X, otherwise
1149 * just one queue set per port.
1150 */
1151 static int setup_sge_queues(struct adapter *adap)
1152 {
1153 int err, msi_idx, i, j;
1154 struct sge *s = &adap->sge;
1155
1156 bitmap_zero(s->starving_fl, MAX_EGRQ);
1157 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1158
1159 if (adap->flags & USING_MSIX)
1160 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1161 else {
1162 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1163 NULL, NULL);
1164 if (err)
1165 return err;
1166 msi_idx = -((int)s->intrq.abs_id + 1);
1167 }
1168
1169 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1170 msi_idx, NULL, fwevtq_handler);
1171 if (err) {
1172 freeout: t4_free_sge_resources(adap);
1173 return err;
1174 }
1175
1176 for_each_port(adap, i) {
1177 struct net_device *dev = adap->port[i];
1178 struct port_info *pi = netdev_priv(dev);
1179 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1180 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1181
1182 for (j = 0; j < pi->nqsets; j++, q++) {
1183 if (msi_idx > 0)
1184 msi_idx++;
1185 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1186 msi_idx, &q->fl,
1187 t4_ethrx_handler);
1188 if (err)
1189 goto freeout;
1190 q->rspq.idx = j;
1191 memset(&q->stats, 0, sizeof(q->stats));
1192 }
1193 for (j = 0; j < pi->nqsets; j++, t++) {
1194 err = t4_sge_alloc_eth_txq(adap, t, dev,
1195 netdev_get_tx_queue(dev, j),
1196 s->fw_evtq.cntxt_id);
1197 if (err)
1198 goto freeout;
1199 }
1200 }
1201
1202 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1203 for_each_ofldrxq(s, i) {
1204 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1205 struct net_device *dev = adap->port[i / j];
1206
1207 if (msi_idx > 0)
1208 msi_idx++;
1209 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1210 q->fl.size ? &q->fl : NULL,
1211 uldrx_handler);
1212 if (err)
1213 goto freeout;
1214 memset(&q->stats, 0, sizeof(q->stats));
1215 s->ofld_rxq[i] = q->rspq.abs_id;
1216 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1217 s->fw_evtq.cntxt_id);
1218 if (err)
1219 goto freeout;
1220 }
1221
1222 for_each_rdmarxq(s, i) {
1223 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1224
1225 if (msi_idx > 0)
1226 msi_idx++;
1227 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1228 msi_idx, q->fl.size ? &q->fl : NULL,
1229 uldrx_handler);
1230 if (err)
1231 goto freeout;
1232 memset(&q->stats, 0, sizeof(q->stats));
1233 s->rdma_rxq[i] = q->rspq.abs_id;
1234 }
1235
1236 for_each_rdmaciq(s, i) {
1237 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1238
1239 if (msi_idx > 0)
1240 msi_idx++;
1241 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1242 msi_idx, q->fl.size ? &q->fl : NULL,
1243 uldrx_handler);
1244 if (err)
1245 goto freeout;
1246 memset(&q->stats, 0, sizeof(q->stats));
1247 s->rdma_ciq[i] = q->rspq.abs_id;
1248 }
1249
1250 for_each_port(adap, i) {
1251 /*
1252 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1253 * have RDMA queues, and that's the right value.
1254 */
1255 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1256 s->fw_evtq.cntxt_id,
1257 s->rdmarxq[i].rspq.cntxt_id);
1258 if (err)
1259 goto freeout;
1260 }
1261
1262 t4_write_reg(adap, is_t4(adap->params.chip) ?
1263 MPS_TRC_RSS_CONTROL :
1264 MPS_T5_TRC_RSS_CONTROL,
1265 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1266 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1267 return 0;
1268 }
1269
1270 /*
1271 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1272 * The allocated memory is cleared.
1273 */
1274 void *t4_alloc_mem(size_t size)
1275 {
1276 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1277
1278 if (!p)
1279 p = vzalloc(size);
1280 return p;
1281 }
1282
1283 /*
1284 * Free memory allocated through alloc_mem().
1285 */
1286 static void t4_free_mem(void *addr)
1287 {
1288 if (is_vmalloc_addr(addr))
1289 vfree(addr);
1290 else
1291 kfree(addr);
1292 }
1293
1294 /* Send a Work Request to write the filter at a specified index. We construct
1295 * a Firmware Filter Work Request to have the work done and put the indicated
1296 * filter into "pending" mode which will prevent any further actions against
1297 * it till we get a reply from the firmware on the completion status of the
1298 * request.
1299 */
1300 static int set_filter_wr(struct adapter *adapter, int fidx)
1301 {
1302 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1303 struct sk_buff *skb;
1304 struct fw_filter_wr *fwr;
1305 unsigned int ftid;
1306
1307 /* If the new filter requires loopback Destination MAC and/or VLAN
1308 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1309 * the filter.
1310 */
1311 if (f->fs.newdmac || f->fs.newvlan) {
1312 /* allocate L2T entry for new filter */
1313 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1314 if (f->l2t == NULL)
1315 return -EAGAIN;
1316 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1317 f->fs.eport, f->fs.dmac)) {
1318 cxgb4_l2t_release(f->l2t);
1319 f->l2t = NULL;
1320 return -ENOMEM;
1321 }
1322 }
1323
1324 ftid = adapter->tids.ftid_base + fidx;
1325
1326 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1327 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1328 memset(fwr, 0, sizeof(*fwr));
1329
1330 /* It would be nice to put most of the following in t4_hw.c but most
1331 * of the work is translating the cxgbtool ch_filter_specification
1332 * into the Work Request and the definition of that structure is
1333 * currently in cxgbtool.h which isn't appropriate to pull into the
1334 * common code. We may eventually try to come up with a more neutral
1335 * filter specification structure but for now it's easiest to simply
1336 * put this fairly direct code in line ...
1337 */
1338 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1339 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1340 fwr->tid_to_iq =
1341 htonl(V_FW_FILTER_WR_TID(ftid) |
1342 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1343 V_FW_FILTER_WR_NOREPLY(0) |
1344 V_FW_FILTER_WR_IQ(f->fs.iq));
1345 fwr->del_filter_to_l2tix =
1346 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1347 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1348 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1349 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1350 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1351 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1352 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1353 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1354 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1355 f->fs.newvlan == VLAN_REWRITE) |
1356 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1357 f->fs.newvlan == VLAN_REWRITE) |
1358 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1359 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1360 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1361 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1362 fwr->ethtype = htons(f->fs.val.ethtype);
1363 fwr->ethtypem = htons(f->fs.mask.ethtype);
1364 fwr->frag_to_ovlan_vldm =
1365 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1366 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1367 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1368 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1369 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1370 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1371 fwr->smac_sel = 0;
1372 fwr->rx_chan_rx_rpl_iq =
1373 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1374 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1375 fwr->maci_to_matchtypem =
1376 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1377 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1378 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1379 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1380 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1381 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1382 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1383 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1384 fwr->ptcl = f->fs.val.proto;
1385 fwr->ptclm = f->fs.mask.proto;
1386 fwr->ttyp = f->fs.val.tos;
1387 fwr->ttypm = f->fs.mask.tos;
1388 fwr->ivlan = htons(f->fs.val.ivlan);
1389 fwr->ivlanm = htons(f->fs.mask.ivlan);
1390 fwr->ovlan = htons(f->fs.val.ovlan);
1391 fwr->ovlanm = htons(f->fs.mask.ovlan);
1392 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1393 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1394 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1395 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1396 fwr->lp = htons(f->fs.val.lport);
1397 fwr->lpm = htons(f->fs.mask.lport);
1398 fwr->fp = htons(f->fs.val.fport);
1399 fwr->fpm = htons(f->fs.mask.fport);
1400 if (f->fs.newsmac)
1401 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1402
1403 /* Mark the filter as "pending" and ship off the Filter Work Request.
1404 * When we get the Work Request Reply we'll clear the pending status.
1405 */
1406 f->pending = 1;
1407 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1408 t4_ofld_send(adapter, skb);
1409 return 0;
1410 }
1411
1412 /* Delete the filter at a specified index.
1413 */
1414 static int del_filter_wr(struct adapter *adapter, int fidx)
1415 {
1416 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1417 struct sk_buff *skb;
1418 struct fw_filter_wr *fwr;
1419 unsigned int len, ftid;
1420
1421 len = sizeof(*fwr);
1422 ftid = adapter->tids.ftid_base + fidx;
1423
1424 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1425 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1426 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1427
1428 /* Mark the filter as "pending" and ship off the Filter Work Request.
1429 * When we get the Work Request Reply we'll clear the pending status.
1430 */
1431 f->pending = 1;
1432 t4_mgmt_tx(adapter, skb);
1433 return 0;
1434 }
1435
1436 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1437 void *accel_priv, select_queue_fallback_t fallback)
1438 {
1439 int txq;
1440
1441 #ifdef CONFIG_CHELSIO_T4_DCB
1442 /* If a Data Center Bridging has been successfully negotiated on this
1443 * link then we'll use the skb's priority to map it to a TX Queue.
1444 * The skb's priority is determined via the VLAN Tag Priority Code
1445 * Point field.
1446 */
1447 if (cxgb4_dcb_enabled(dev)) {
1448 u16 vlan_tci;
1449 int err;
1450
1451 err = vlan_get_tag(skb, &vlan_tci);
1452 if (unlikely(err)) {
1453 if (net_ratelimit())
1454 netdev_warn(dev,
1455 "TX Packet without VLAN Tag on DCB Link\n");
1456 txq = 0;
1457 } else {
1458 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1459 }
1460 return txq;
1461 }
1462 #endif /* CONFIG_CHELSIO_T4_DCB */
1463
1464 if (select_queue) {
1465 txq = (skb_rx_queue_recorded(skb)
1466 ? skb_get_rx_queue(skb)
1467 : smp_processor_id());
1468
1469 while (unlikely(txq >= dev->real_num_tx_queues))
1470 txq -= dev->real_num_tx_queues;
1471
1472 return txq;
1473 }
1474
1475 return fallback(dev, skb) % dev->real_num_tx_queues;
1476 }
1477
1478 static inline int is_offload(const struct adapter *adap)
1479 {
1480 return adap->params.offload;
1481 }
1482
1483 /*
1484 * Implementation of ethtool operations.
1485 */
1486
1487 static u32 get_msglevel(struct net_device *dev)
1488 {
1489 return netdev2adap(dev)->msg_enable;
1490 }
1491
1492 static void set_msglevel(struct net_device *dev, u32 val)
1493 {
1494 netdev2adap(dev)->msg_enable = val;
1495 }
1496
1497 static char stats_strings[][ETH_GSTRING_LEN] = {
1498 "TxOctetsOK ",
1499 "TxFramesOK ",
1500 "TxBroadcastFrames ",
1501 "TxMulticastFrames ",
1502 "TxUnicastFrames ",
1503 "TxErrorFrames ",
1504
1505 "TxFrames64 ",
1506 "TxFrames65To127 ",
1507 "TxFrames128To255 ",
1508 "TxFrames256To511 ",
1509 "TxFrames512To1023 ",
1510 "TxFrames1024To1518 ",
1511 "TxFrames1519ToMax ",
1512
1513 "TxFramesDropped ",
1514 "TxPauseFrames ",
1515 "TxPPP0Frames ",
1516 "TxPPP1Frames ",
1517 "TxPPP2Frames ",
1518 "TxPPP3Frames ",
1519 "TxPPP4Frames ",
1520 "TxPPP5Frames ",
1521 "TxPPP6Frames ",
1522 "TxPPP7Frames ",
1523
1524 "RxOctetsOK ",
1525 "RxFramesOK ",
1526 "RxBroadcastFrames ",
1527 "RxMulticastFrames ",
1528 "RxUnicastFrames ",
1529
1530 "RxFramesTooLong ",
1531 "RxJabberErrors ",
1532 "RxFCSErrors ",
1533 "RxLengthErrors ",
1534 "RxSymbolErrors ",
1535 "RxRuntFrames ",
1536
1537 "RxFrames64 ",
1538 "RxFrames65To127 ",
1539 "RxFrames128To255 ",
1540 "RxFrames256To511 ",
1541 "RxFrames512To1023 ",
1542 "RxFrames1024To1518 ",
1543 "RxFrames1519ToMax ",
1544
1545 "RxPauseFrames ",
1546 "RxPPP0Frames ",
1547 "RxPPP1Frames ",
1548 "RxPPP2Frames ",
1549 "RxPPP3Frames ",
1550 "RxPPP4Frames ",
1551 "RxPPP5Frames ",
1552 "RxPPP6Frames ",
1553 "RxPPP7Frames ",
1554
1555 "RxBG0FramesDropped ",
1556 "RxBG1FramesDropped ",
1557 "RxBG2FramesDropped ",
1558 "RxBG3FramesDropped ",
1559 "RxBG0FramesTrunc ",
1560 "RxBG1FramesTrunc ",
1561 "RxBG2FramesTrunc ",
1562 "RxBG3FramesTrunc ",
1563
1564 "TSO ",
1565 "TxCsumOffload ",
1566 "RxCsumGood ",
1567 "VLANextractions ",
1568 "VLANinsertions ",
1569 "GROpackets ",
1570 "GROmerged ",
1571 "WriteCoalSuccess ",
1572 "WriteCoalFail ",
1573 };
1574
1575 static int get_sset_count(struct net_device *dev, int sset)
1576 {
1577 switch (sset) {
1578 case ETH_SS_STATS:
1579 return ARRAY_SIZE(stats_strings);
1580 default:
1581 return -EOPNOTSUPP;
1582 }
1583 }
1584
1585 #define T4_REGMAP_SIZE (160 * 1024)
1586 #define T5_REGMAP_SIZE (332 * 1024)
1587
1588 static int get_regs_len(struct net_device *dev)
1589 {
1590 struct adapter *adap = netdev2adap(dev);
1591 if (is_t4(adap->params.chip))
1592 return T4_REGMAP_SIZE;
1593 else
1594 return T5_REGMAP_SIZE;
1595 }
1596
1597 static int get_eeprom_len(struct net_device *dev)
1598 {
1599 return EEPROMSIZE;
1600 }
1601
1602 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1603 {
1604 struct adapter *adapter = netdev2adap(dev);
1605
1606 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1607 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1608 strlcpy(info->bus_info, pci_name(adapter->pdev),
1609 sizeof(info->bus_info));
1610
1611 if (adapter->params.fw_vers)
1612 snprintf(info->fw_version, sizeof(info->fw_version),
1613 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1614 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1615 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1616 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1617 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1618 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1619 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1620 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1621 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1622 }
1623
1624 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1625 {
1626 if (stringset == ETH_SS_STATS)
1627 memcpy(data, stats_strings, sizeof(stats_strings));
1628 }
1629
1630 /*
1631 * port stats maintained per queue of the port. They should be in the same
1632 * order as in stats_strings above.
1633 */
1634 struct queue_port_stats {
1635 u64 tso;
1636 u64 tx_csum;
1637 u64 rx_csum;
1638 u64 vlan_ex;
1639 u64 vlan_ins;
1640 u64 gro_pkts;
1641 u64 gro_merged;
1642 };
1643
1644 static void collect_sge_port_stats(const struct adapter *adap,
1645 const struct port_info *p, struct queue_port_stats *s)
1646 {
1647 int i;
1648 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1649 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1650
1651 memset(s, 0, sizeof(*s));
1652 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1653 s->tso += tx->tso;
1654 s->tx_csum += tx->tx_cso;
1655 s->rx_csum += rx->stats.rx_cso;
1656 s->vlan_ex += rx->stats.vlan_ex;
1657 s->vlan_ins += tx->vlan_ins;
1658 s->gro_pkts += rx->stats.lro_pkts;
1659 s->gro_merged += rx->stats.lro_merged;
1660 }
1661 }
1662
1663 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1664 u64 *data)
1665 {
1666 struct port_info *pi = netdev_priv(dev);
1667 struct adapter *adapter = pi->adapter;
1668 u32 val1, val2;
1669
1670 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1671
1672 data += sizeof(struct port_stats) / sizeof(u64);
1673 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1674 data += sizeof(struct queue_port_stats) / sizeof(u64);
1675 if (!is_t4(adapter->params.chip)) {
1676 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1677 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1678 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1679 *data = val1 - val2;
1680 data++;
1681 *data = val2;
1682 data++;
1683 } else {
1684 memset(data, 0, 2 * sizeof(u64));
1685 *data += 2;
1686 }
1687 }
1688
1689 /*
1690 * Return a version number to identify the type of adapter. The scheme is:
1691 * - bits 0..9: chip version
1692 * - bits 10..15: chip revision
1693 * - bits 16..23: register dump version
1694 */
1695 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1696 {
1697 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1698 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1699 }
1700
1701 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1702 unsigned int end)
1703 {
1704 u32 *p = buf + start;
1705
1706 for ( ; start <= end; start += sizeof(u32))
1707 *p++ = t4_read_reg(ap, start);
1708 }
1709
1710 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1711 void *buf)
1712 {
1713 static const unsigned int t4_reg_ranges[] = {
1714 0x1008, 0x1108,
1715 0x1180, 0x11b4,
1716 0x11fc, 0x123c,
1717 0x1300, 0x173c,
1718 0x1800, 0x18fc,
1719 0x3000, 0x30d8,
1720 0x30e0, 0x5924,
1721 0x5960, 0x59d4,
1722 0x5a00, 0x5af8,
1723 0x6000, 0x6098,
1724 0x6100, 0x6150,
1725 0x6200, 0x6208,
1726 0x6240, 0x6248,
1727 0x6280, 0x6338,
1728 0x6370, 0x638c,
1729 0x6400, 0x643c,
1730 0x6500, 0x6524,
1731 0x6a00, 0x6a38,
1732 0x6a60, 0x6a78,
1733 0x6b00, 0x6b84,
1734 0x6bf0, 0x6c84,
1735 0x6cf0, 0x6d84,
1736 0x6df0, 0x6e84,
1737 0x6ef0, 0x6f84,
1738 0x6ff0, 0x7084,
1739 0x70f0, 0x7184,
1740 0x71f0, 0x7284,
1741 0x72f0, 0x7384,
1742 0x73f0, 0x7450,
1743 0x7500, 0x7530,
1744 0x7600, 0x761c,
1745 0x7680, 0x76cc,
1746 0x7700, 0x7798,
1747 0x77c0, 0x77fc,
1748 0x7900, 0x79fc,
1749 0x7b00, 0x7c38,
1750 0x7d00, 0x7efc,
1751 0x8dc0, 0x8e1c,
1752 0x8e30, 0x8e78,
1753 0x8ea0, 0x8f6c,
1754 0x8fc0, 0x9074,
1755 0x90fc, 0x90fc,
1756 0x9400, 0x9458,
1757 0x9600, 0x96bc,
1758 0x9800, 0x9808,
1759 0x9820, 0x983c,
1760 0x9850, 0x9864,
1761 0x9c00, 0x9c6c,
1762 0x9c80, 0x9cec,
1763 0x9d00, 0x9d6c,
1764 0x9d80, 0x9dec,
1765 0x9e00, 0x9e6c,
1766 0x9e80, 0x9eec,
1767 0x9f00, 0x9f6c,
1768 0x9f80, 0x9fec,
1769 0xd004, 0xd03c,
1770 0xdfc0, 0xdfe0,
1771 0xe000, 0xea7c,
1772 0xf000, 0x11110,
1773 0x11118, 0x11190,
1774 0x19040, 0x1906c,
1775 0x19078, 0x19080,
1776 0x1908c, 0x19124,
1777 0x19150, 0x191b0,
1778 0x191d0, 0x191e8,
1779 0x19238, 0x1924c,
1780 0x193f8, 0x19474,
1781 0x19490, 0x194f8,
1782 0x19800, 0x19f30,
1783 0x1a000, 0x1a06c,
1784 0x1a0b0, 0x1a120,
1785 0x1a128, 0x1a138,
1786 0x1a190, 0x1a1c4,
1787 0x1a1fc, 0x1a1fc,
1788 0x1e040, 0x1e04c,
1789 0x1e284, 0x1e28c,
1790 0x1e2c0, 0x1e2c0,
1791 0x1e2e0, 0x1e2e0,
1792 0x1e300, 0x1e384,
1793 0x1e3c0, 0x1e3c8,
1794 0x1e440, 0x1e44c,
1795 0x1e684, 0x1e68c,
1796 0x1e6c0, 0x1e6c0,
1797 0x1e6e0, 0x1e6e0,
1798 0x1e700, 0x1e784,
1799 0x1e7c0, 0x1e7c8,
1800 0x1e840, 0x1e84c,
1801 0x1ea84, 0x1ea8c,
1802 0x1eac0, 0x1eac0,
1803 0x1eae0, 0x1eae0,
1804 0x1eb00, 0x1eb84,
1805 0x1ebc0, 0x1ebc8,
1806 0x1ec40, 0x1ec4c,
1807 0x1ee84, 0x1ee8c,
1808 0x1eec0, 0x1eec0,
1809 0x1eee0, 0x1eee0,
1810 0x1ef00, 0x1ef84,
1811 0x1efc0, 0x1efc8,
1812 0x1f040, 0x1f04c,
1813 0x1f284, 0x1f28c,
1814 0x1f2c0, 0x1f2c0,
1815 0x1f2e0, 0x1f2e0,
1816 0x1f300, 0x1f384,
1817 0x1f3c0, 0x1f3c8,
1818 0x1f440, 0x1f44c,
1819 0x1f684, 0x1f68c,
1820 0x1f6c0, 0x1f6c0,
1821 0x1f6e0, 0x1f6e0,
1822 0x1f700, 0x1f784,
1823 0x1f7c0, 0x1f7c8,
1824 0x1f840, 0x1f84c,
1825 0x1fa84, 0x1fa8c,
1826 0x1fac0, 0x1fac0,
1827 0x1fae0, 0x1fae0,
1828 0x1fb00, 0x1fb84,
1829 0x1fbc0, 0x1fbc8,
1830 0x1fc40, 0x1fc4c,
1831 0x1fe84, 0x1fe8c,
1832 0x1fec0, 0x1fec0,
1833 0x1fee0, 0x1fee0,
1834 0x1ff00, 0x1ff84,
1835 0x1ffc0, 0x1ffc8,
1836 0x20000, 0x2002c,
1837 0x20100, 0x2013c,
1838 0x20190, 0x201c8,
1839 0x20200, 0x20318,
1840 0x20400, 0x20528,
1841 0x20540, 0x20614,
1842 0x21000, 0x21040,
1843 0x2104c, 0x21060,
1844 0x210c0, 0x210ec,
1845 0x21200, 0x21268,
1846 0x21270, 0x21284,
1847 0x212fc, 0x21388,
1848 0x21400, 0x21404,
1849 0x21500, 0x21518,
1850 0x2152c, 0x2153c,
1851 0x21550, 0x21554,
1852 0x21600, 0x21600,
1853 0x21608, 0x21628,
1854 0x21630, 0x2163c,
1855 0x21700, 0x2171c,
1856 0x21780, 0x2178c,
1857 0x21800, 0x21c38,
1858 0x21c80, 0x21d7c,
1859 0x21e00, 0x21e04,
1860 0x22000, 0x2202c,
1861 0x22100, 0x2213c,
1862 0x22190, 0x221c8,
1863 0x22200, 0x22318,
1864 0x22400, 0x22528,
1865 0x22540, 0x22614,
1866 0x23000, 0x23040,
1867 0x2304c, 0x23060,
1868 0x230c0, 0x230ec,
1869 0x23200, 0x23268,
1870 0x23270, 0x23284,
1871 0x232fc, 0x23388,
1872 0x23400, 0x23404,
1873 0x23500, 0x23518,
1874 0x2352c, 0x2353c,
1875 0x23550, 0x23554,
1876 0x23600, 0x23600,
1877 0x23608, 0x23628,
1878 0x23630, 0x2363c,
1879 0x23700, 0x2371c,
1880 0x23780, 0x2378c,
1881 0x23800, 0x23c38,
1882 0x23c80, 0x23d7c,
1883 0x23e00, 0x23e04,
1884 0x24000, 0x2402c,
1885 0x24100, 0x2413c,
1886 0x24190, 0x241c8,
1887 0x24200, 0x24318,
1888 0x24400, 0x24528,
1889 0x24540, 0x24614,
1890 0x25000, 0x25040,
1891 0x2504c, 0x25060,
1892 0x250c0, 0x250ec,
1893 0x25200, 0x25268,
1894 0x25270, 0x25284,
1895 0x252fc, 0x25388,
1896 0x25400, 0x25404,
1897 0x25500, 0x25518,
1898 0x2552c, 0x2553c,
1899 0x25550, 0x25554,
1900 0x25600, 0x25600,
1901 0x25608, 0x25628,
1902 0x25630, 0x2563c,
1903 0x25700, 0x2571c,
1904 0x25780, 0x2578c,
1905 0x25800, 0x25c38,
1906 0x25c80, 0x25d7c,
1907 0x25e00, 0x25e04,
1908 0x26000, 0x2602c,
1909 0x26100, 0x2613c,
1910 0x26190, 0x261c8,
1911 0x26200, 0x26318,
1912 0x26400, 0x26528,
1913 0x26540, 0x26614,
1914 0x27000, 0x27040,
1915 0x2704c, 0x27060,
1916 0x270c0, 0x270ec,
1917 0x27200, 0x27268,
1918 0x27270, 0x27284,
1919 0x272fc, 0x27388,
1920 0x27400, 0x27404,
1921 0x27500, 0x27518,
1922 0x2752c, 0x2753c,
1923 0x27550, 0x27554,
1924 0x27600, 0x27600,
1925 0x27608, 0x27628,
1926 0x27630, 0x2763c,
1927 0x27700, 0x2771c,
1928 0x27780, 0x2778c,
1929 0x27800, 0x27c38,
1930 0x27c80, 0x27d7c,
1931 0x27e00, 0x27e04
1932 };
1933
1934 static const unsigned int t5_reg_ranges[] = {
1935 0x1008, 0x1148,
1936 0x1180, 0x11b4,
1937 0x11fc, 0x123c,
1938 0x1280, 0x173c,
1939 0x1800, 0x18fc,
1940 0x3000, 0x3028,
1941 0x3060, 0x30d8,
1942 0x30e0, 0x30fc,
1943 0x3140, 0x357c,
1944 0x35a8, 0x35cc,
1945 0x35ec, 0x35ec,
1946 0x3600, 0x5624,
1947 0x56cc, 0x575c,
1948 0x580c, 0x5814,
1949 0x5890, 0x58bc,
1950 0x5940, 0x59dc,
1951 0x59fc, 0x5a18,
1952 0x5a60, 0x5a9c,
1953 0x5b9c, 0x5bfc,
1954 0x6000, 0x6040,
1955 0x6058, 0x614c,
1956 0x7700, 0x7798,
1957 0x77c0, 0x78fc,
1958 0x7b00, 0x7c54,
1959 0x7d00, 0x7efc,
1960 0x8dc0, 0x8de0,
1961 0x8df8, 0x8e84,
1962 0x8ea0, 0x8f84,
1963 0x8fc0, 0x90f8,
1964 0x9400, 0x9470,
1965 0x9600, 0x96f4,
1966 0x9800, 0x9808,
1967 0x9820, 0x983c,
1968 0x9850, 0x9864,
1969 0x9c00, 0x9c6c,
1970 0x9c80, 0x9cec,
1971 0x9d00, 0x9d6c,
1972 0x9d80, 0x9dec,
1973 0x9e00, 0x9e6c,
1974 0x9e80, 0x9eec,
1975 0x9f00, 0x9f6c,
1976 0x9f80, 0xa020,
1977 0xd004, 0xd03c,
1978 0xdfc0, 0xdfe0,
1979 0xe000, 0x11088,
1980 0x1109c, 0x11110,
1981 0x11118, 0x1117c,
1982 0x11190, 0x11204,
1983 0x19040, 0x1906c,
1984 0x19078, 0x19080,
1985 0x1908c, 0x19124,
1986 0x19150, 0x191b0,
1987 0x191d0, 0x191e8,
1988 0x19238, 0x19290,
1989 0x193f8, 0x19474,
1990 0x19490, 0x194cc,
1991 0x194f0, 0x194f8,
1992 0x19c00, 0x19c60,
1993 0x19c94, 0x19e10,
1994 0x19e50, 0x19f34,
1995 0x19f40, 0x19f50,
1996 0x19f90, 0x19fe4,
1997 0x1a000, 0x1a06c,
1998 0x1a0b0, 0x1a120,
1999 0x1a128, 0x1a138,
2000 0x1a190, 0x1a1c4,
2001 0x1a1fc, 0x1a1fc,
2002 0x1e008, 0x1e00c,
2003 0x1e040, 0x1e04c,
2004 0x1e284, 0x1e290,
2005 0x1e2c0, 0x1e2c0,
2006 0x1e2e0, 0x1e2e0,
2007 0x1e300, 0x1e384,
2008 0x1e3c0, 0x1e3c8,
2009 0x1e408, 0x1e40c,
2010 0x1e440, 0x1e44c,
2011 0x1e684, 0x1e690,
2012 0x1e6c0, 0x1e6c0,
2013 0x1e6e0, 0x1e6e0,
2014 0x1e700, 0x1e784,
2015 0x1e7c0, 0x1e7c8,
2016 0x1e808, 0x1e80c,
2017 0x1e840, 0x1e84c,
2018 0x1ea84, 0x1ea90,
2019 0x1eac0, 0x1eac0,
2020 0x1eae0, 0x1eae0,
2021 0x1eb00, 0x1eb84,
2022 0x1ebc0, 0x1ebc8,
2023 0x1ec08, 0x1ec0c,
2024 0x1ec40, 0x1ec4c,
2025 0x1ee84, 0x1ee90,
2026 0x1eec0, 0x1eec0,
2027 0x1eee0, 0x1eee0,
2028 0x1ef00, 0x1ef84,
2029 0x1efc0, 0x1efc8,
2030 0x1f008, 0x1f00c,
2031 0x1f040, 0x1f04c,
2032 0x1f284, 0x1f290,
2033 0x1f2c0, 0x1f2c0,
2034 0x1f2e0, 0x1f2e0,
2035 0x1f300, 0x1f384,
2036 0x1f3c0, 0x1f3c8,
2037 0x1f408, 0x1f40c,
2038 0x1f440, 0x1f44c,
2039 0x1f684, 0x1f690,
2040 0x1f6c0, 0x1f6c0,
2041 0x1f6e0, 0x1f6e0,
2042 0x1f700, 0x1f784,
2043 0x1f7c0, 0x1f7c8,
2044 0x1f808, 0x1f80c,
2045 0x1f840, 0x1f84c,
2046 0x1fa84, 0x1fa90,
2047 0x1fac0, 0x1fac0,
2048 0x1fae0, 0x1fae0,
2049 0x1fb00, 0x1fb84,
2050 0x1fbc0, 0x1fbc8,
2051 0x1fc08, 0x1fc0c,
2052 0x1fc40, 0x1fc4c,
2053 0x1fe84, 0x1fe90,
2054 0x1fec0, 0x1fec0,
2055 0x1fee0, 0x1fee0,
2056 0x1ff00, 0x1ff84,
2057 0x1ffc0, 0x1ffc8,
2058 0x30000, 0x30030,
2059 0x30100, 0x30144,
2060 0x30190, 0x301d0,
2061 0x30200, 0x30318,
2062 0x30400, 0x3052c,
2063 0x30540, 0x3061c,
2064 0x30800, 0x30834,
2065 0x308c0, 0x30908,
2066 0x30910, 0x309ac,
2067 0x30a00, 0x30a04,
2068 0x30a0c, 0x30a2c,
2069 0x30a44, 0x30a50,
2070 0x30a74, 0x30c24,
2071 0x30d08, 0x30d14,
2072 0x30d1c, 0x30d20,
2073 0x30d3c, 0x30d50,
2074 0x31200, 0x3120c,
2075 0x31220, 0x31220,
2076 0x31240, 0x31240,
2077 0x31600, 0x31600,
2078 0x31608, 0x3160c,
2079 0x31a00, 0x31a1c,
2080 0x31e04, 0x31e20,
2081 0x31e38, 0x31e3c,
2082 0x31e80, 0x31e80,
2083 0x31e88, 0x31ea8,
2084 0x31eb0, 0x31eb4,
2085 0x31ec8, 0x31ed4,
2086 0x31fb8, 0x32004,
2087 0x32208, 0x3223c,
2088 0x32600, 0x32630,
2089 0x32a00, 0x32abc,
2090 0x32b00, 0x32b70,
2091 0x33000, 0x33048,
2092 0x33060, 0x3309c,
2093 0x330f0, 0x33148,
2094 0x33160, 0x3319c,
2095 0x331f0, 0x332e4,
2096 0x332f8, 0x333e4,
2097 0x333f8, 0x33448,
2098 0x33460, 0x3349c,
2099 0x334f0, 0x33548,
2100 0x33560, 0x3359c,
2101 0x335f0, 0x336e4,
2102 0x336f8, 0x337e4,
2103 0x337f8, 0x337fc,
2104 0x33814, 0x33814,
2105 0x3382c, 0x3382c,
2106 0x33880, 0x3388c,
2107 0x338e8, 0x338ec,
2108 0x33900, 0x33948,
2109 0x33960, 0x3399c,
2110 0x339f0, 0x33ae4,
2111 0x33af8, 0x33b10,
2112 0x33b28, 0x33b28,
2113 0x33b3c, 0x33b50,
2114 0x33bf0, 0x33c10,
2115 0x33c28, 0x33c28,
2116 0x33c3c, 0x33c50,
2117 0x33cf0, 0x33cfc,
2118 0x34000, 0x34030,
2119 0x34100, 0x34144,
2120 0x34190, 0x341d0,
2121 0x34200, 0x34318,
2122 0x34400, 0x3452c,
2123 0x34540, 0x3461c,
2124 0x34800, 0x34834,
2125 0x348c0, 0x34908,
2126 0x34910, 0x349ac,
2127 0x34a00, 0x34a04,
2128 0x34a0c, 0x34a2c,
2129 0x34a44, 0x34a50,
2130 0x34a74, 0x34c24,
2131 0x34d08, 0x34d14,
2132 0x34d1c, 0x34d20,
2133 0x34d3c, 0x34d50,
2134 0x35200, 0x3520c,
2135 0x35220, 0x35220,
2136 0x35240, 0x35240,
2137 0x35600, 0x35600,
2138 0x35608, 0x3560c,
2139 0x35a00, 0x35a1c,
2140 0x35e04, 0x35e20,
2141 0x35e38, 0x35e3c,
2142 0x35e80, 0x35e80,
2143 0x35e88, 0x35ea8,
2144 0x35eb0, 0x35eb4,
2145 0x35ec8, 0x35ed4,
2146 0x35fb8, 0x36004,
2147 0x36208, 0x3623c,
2148 0x36600, 0x36630,
2149 0x36a00, 0x36abc,
2150 0x36b00, 0x36b70,
2151 0x37000, 0x37048,
2152 0x37060, 0x3709c,
2153 0x370f0, 0x37148,
2154 0x37160, 0x3719c,
2155 0x371f0, 0x372e4,
2156 0x372f8, 0x373e4,
2157 0x373f8, 0x37448,
2158 0x37460, 0x3749c,
2159 0x374f0, 0x37548,
2160 0x37560, 0x3759c,
2161 0x375f0, 0x376e4,
2162 0x376f8, 0x377e4,
2163 0x377f8, 0x377fc,
2164 0x37814, 0x37814,
2165 0x3782c, 0x3782c,
2166 0x37880, 0x3788c,
2167 0x378e8, 0x378ec,
2168 0x37900, 0x37948,
2169 0x37960, 0x3799c,
2170 0x379f0, 0x37ae4,
2171 0x37af8, 0x37b10,
2172 0x37b28, 0x37b28,
2173 0x37b3c, 0x37b50,
2174 0x37bf0, 0x37c10,
2175 0x37c28, 0x37c28,
2176 0x37c3c, 0x37c50,
2177 0x37cf0, 0x37cfc,
2178 0x38000, 0x38030,
2179 0x38100, 0x38144,
2180 0x38190, 0x381d0,
2181 0x38200, 0x38318,
2182 0x38400, 0x3852c,
2183 0x38540, 0x3861c,
2184 0x38800, 0x38834,
2185 0x388c0, 0x38908,
2186 0x38910, 0x389ac,
2187 0x38a00, 0x38a04,
2188 0x38a0c, 0x38a2c,
2189 0x38a44, 0x38a50,
2190 0x38a74, 0x38c24,
2191 0x38d08, 0x38d14,
2192 0x38d1c, 0x38d20,
2193 0x38d3c, 0x38d50,
2194 0x39200, 0x3920c,
2195 0x39220, 0x39220,
2196 0x39240, 0x39240,
2197 0x39600, 0x39600,
2198 0x39608, 0x3960c,
2199 0x39a00, 0x39a1c,
2200 0x39e04, 0x39e20,
2201 0x39e38, 0x39e3c,
2202 0x39e80, 0x39e80,
2203 0x39e88, 0x39ea8,
2204 0x39eb0, 0x39eb4,
2205 0x39ec8, 0x39ed4,
2206 0x39fb8, 0x3a004,
2207 0x3a208, 0x3a23c,
2208 0x3a600, 0x3a630,
2209 0x3aa00, 0x3aabc,
2210 0x3ab00, 0x3ab70,
2211 0x3b000, 0x3b048,
2212 0x3b060, 0x3b09c,
2213 0x3b0f0, 0x3b148,
2214 0x3b160, 0x3b19c,
2215 0x3b1f0, 0x3b2e4,
2216 0x3b2f8, 0x3b3e4,
2217 0x3b3f8, 0x3b448,
2218 0x3b460, 0x3b49c,
2219 0x3b4f0, 0x3b548,
2220 0x3b560, 0x3b59c,
2221 0x3b5f0, 0x3b6e4,
2222 0x3b6f8, 0x3b7e4,
2223 0x3b7f8, 0x3b7fc,
2224 0x3b814, 0x3b814,
2225 0x3b82c, 0x3b82c,
2226 0x3b880, 0x3b88c,
2227 0x3b8e8, 0x3b8ec,
2228 0x3b900, 0x3b948,
2229 0x3b960, 0x3b99c,
2230 0x3b9f0, 0x3bae4,
2231 0x3baf8, 0x3bb10,
2232 0x3bb28, 0x3bb28,
2233 0x3bb3c, 0x3bb50,
2234 0x3bbf0, 0x3bc10,
2235 0x3bc28, 0x3bc28,
2236 0x3bc3c, 0x3bc50,
2237 0x3bcf0, 0x3bcfc,
2238 0x3c000, 0x3c030,
2239 0x3c100, 0x3c144,
2240 0x3c190, 0x3c1d0,
2241 0x3c200, 0x3c318,
2242 0x3c400, 0x3c52c,
2243 0x3c540, 0x3c61c,
2244 0x3c800, 0x3c834,
2245 0x3c8c0, 0x3c908,
2246 0x3c910, 0x3c9ac,
2247 0x3ca00, 0x3ca04,
2248 0x3ca0c, 0x3ca2c,
2249 0x3ca44, 0x3ca50,
2250 0x3ca74, 0x3cc24,
2251 0x3cd08, 0x3cd14,
2252 0x3cd1c, 0x3cd20,
2253 0x3cd3c, 0x3cd50,
2254 0x3d200, 0x3d20c,
2255 0x3d220, 0x3d220,
2256 0x3d240, 0x3d240,
2257 0x3d600, 0x3d600,
2258 0x3d608, 0x3d60c,
2259 0x3da00, 0x3da1c,
2260 0x3de04, 0x3de20,
2261 0x3de38, 0x3de3c,
2262 0x3de80, 0x3de80,
2263 0x3de88, 0x3dea8,
2264 0x3deb0, 0x3deb4,
2265 0x3dec8, 0x3ded4,
2266 0x3dfb8, 0x3e004,
2267 0x3e208, 0x3e23c,
2268 0x3e600, 0x3e630,
2269 0x3ea00, 0x3eabc,
2270 0x3eb00, 0x3eb70,
2271 0x3f000, 0x3f048,
2272 0x3f060, 0x3f09c,
2273 0x3f0f0, 0x3f148,
2274 0x3f160, 0x3f19c,
2275 0x3f1f0, 0x3f2e4,
2276 0x3f2f8, 0x3f3e4,
2277 0x3f3f8, 0x3f448,
2278 0x3f460, 0x3f49c,
2279 0x3f4f0, 0x3f548,
2280 0x3f560, 0x3f59c,
2281 0x3f5f0, 0x3f6e4,
2282 0x3f6f8, 0x3f7e4,
2283 0x3f7f8, 0x3f7fc,
2284 0x3f814, 0x3f814,
2285 0x3f82c, 0x3f82c,
2286 0x3f880, 0x3f88c,
2287 0x3f8e8, 0x3f8ec,
2288 0x3f900, 0x3f948,
2289 0x3f960, 0x3f99c,
2290 0x3f9f0, 0x3fae4,
2291 0x3faf8, 0x3fb10,
2292 0x3fb28, 0x3fb28,
2293 0x3fb3c, 0x3fb50,
2294 0x3fbf0, 0x3fc10,
2295 0x3fc28, 0x3fc28,
2296 0x3fc3c, 0x3fc50,
2297 0x3fcf0, 0x3fcfc,
2298 0x40000, 0x4000c,
2299 0x40040, 0x40068,
2300 0x40080, 0x40144,
2301 0x40180, 0x4018c,
2302 0x40200, 0x40298,
2303 0x402ac, 0x4033c,
2304 0x403f8, 0x403fc,
2305 0x41304, 0x413c4,
2306 0x41400, 0x4141c,
2307 0x41480, 0x414d0,
2308 0x44000, 0x44078,
2309 0x440c0, 0x44278,
2310 0x442c0, 0x44478,
2311 0x444c0, 0x44678,
2312 0x446c0, 0x44878,
2313 0x448c0, 0x449fc,
2314 0x45000, 0x45068,
2315 0x45080, 0x45084,
2316 0x450a0, 0x450b0,
2317 0x45200, 0x45268,
2318 0x45280, 0x45284,
2319 0x452a0, 0x452b0,
2320 0x460c0, 0x460e4,
2321 0x47000, 0x4708c,
2322 0x47200, 0x47250,
2323 0x47400, 0x47420,
2324 0x47600, 0x47618,
2325 0x47800, 0x47814,
2326 0x48000, 0x4800c,
2327 0x48040, 0x48068,
2328 0x48080, 0x48144,
2329 0x48180, 0x4818c,
2330 0x48200, 0x48298,
2331 0x482ac, 0x4833c,
2332 0x483f8, 0x483fc,
2333 0x49304, 0x493c4,
2334 0x49400, 0x4941c,
2335 0x49480, 0x494d0,
2336 0x4c000, 0x4c078,
2337 0x4c0c0, 0x4c278,
2338 0x4c2c0, 0x4c478,
2339 0x4c4c0, 0x4c678,
2340 0x4c6c0, 0x4c878,
2341 0x4c8c0, 0x4c9fc,
2342 0x4d000, 0x4d068,
2343 0x4d080, 0x4d084,
2344 0x4d0a0, 0x4d0b0,
2345 0x4d200, 0x4d268,
2346 0x4d280, 0x4d284,
2347 0x4d2a0, 0x4d2b0,
2348 0x4e0c0, 0x4e0e4,
2349 0x4f000, 0x4f08c,
2350 0x4f200, 0x4f250,
2351 0x4f400, 0x4f420,
2352 0x4f600, 0x4f618,
2353 0x4f800, 0x4f814,
2354 0x50000, 0x500cc,
2355 0x50400, 0x50400,
2356 0x50800, 0x508cc,
2357 0x50c00, 0x50c00,
2358 0x51000, 0x5101c,
2359 0x51300, 0x51308,
2360 };
2361
2362 int i;
2363 struct adapter *ap = netdev2adap(dev);
2364 static const unsigned int *reg_ranges;
2365 int arr_size = 0, buf_size = 0;
2366
2367 if (is_t4(ap->params.chip)) {
2368 reg_ranges = &t4_reg_ranges[0];
2369 arr_size = ARRAY_SIZE(t4_reg_ranges);
2370 buf_size = T4_REGMAP_SIZE;
2371 } else {
2372 reg_ranges = &t5_reg_ranges[0];
2373 arr_size = ARRAY_SIZE(t5_reg_ranges);
2374 buf_size = T5_REGMAP_SIZE;
2375 }
2376
2377 regs->version = mk_adap_vers(ap);
2378
2379 memset(buf, 0, buf_size);
2380 for (i = 0; i < arr_size; i += 2)
2381 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2382 }
2383
2384 static int restart_autoneg(struct net_device *dev)
2385 {
2386 struct port_info *p = netdev_priv(dev);
2387
2388 if (!netif_running(dev))
2389 return -EAGAIN;
2390 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2391 return -EINVAL;
2392 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2393 return 0;
2394 }
2395
2396 static int identify_port(struct net_device *dev,
2397 enum ethtool_phys_id_state state)
2398 {
2399 unsigned int val;
2400 struct adapter *adap = netdev2adap(dev);
2401
2402 if (state == ETHTOOL_ID_ACTIVE)
2403 val = 0xffff;
2404 else if (state == ETHTOOL_ID_INACTIVE)
2405 val = 0;
2406 else
2407 return -EINVAL;
2408
2409 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2410 }
2411
2412 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2413 {
2414 unsigned int v = 0;
2415
2416 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2417 type == FW_PORT_TYPE_BT_XAUI) {
2418 v |= SUPPORTED_TP;
2419 if (caps & FW_PORT_CAP_SPEED_100M)
2420 v |= SUPPORTED_100baseT_Full;
2421 if (caps & FW_PORT_CAP_SPEED_1G)
2422 v |= SUPPORTED_1000baseT_Full;
2423 if (caps & FW_PORT_CAP_SPEED_10G)
2424 v |= SUPPORTED_10000baseT_Full;
2425 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2426 v |= SUPPORTED_Backplane;
2427 if (caps & FW_PORT_CAP_SPEED_1G)
2428 v |= SUPPORTED_1000baseKX_Full;
2429 if (caps & FW_PORT_CAP_SPEED_10G)
2430 v |= SUPPORTED_10000baseKX4_Full;
2431 } else if (type == FW_PORT_TYPE_KR)
2432 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2433 else if (type == FW_PORT_TYPE_BP_AP)
2434 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2435 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2436 else if (type == FW_PORT_TYPE_BP4_AP)
2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2438 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2439 SUPPORTED_10000baseKX4_Full;
2440 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2441 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2442 v |= SUPPORTED_FIBRE;
2443 else if (type == FW_PORT_TYPE_BP40_BA)
2444 v |= SUPPORTED_40000baseSR4_Full;
2445
2446 if (caps & FW_PORT_CAP_ANEG)
2447 v |= SUPPORTED_Autoneg;
2448 return v;
2449 }
2450
2451 static unsigned int to_fw_linkcaps(unsigned int caps)
2452 {
2453 unsigned int v = 0;
2454
2455 if (caps & ADVERTISED_100baseT_Full)
2456 v |= FW_PORT_CAP_SPEED_100M;
2457 if (caps & ADVERTISED_1000baseT_Full)
2458 v |= FW_PORT_CAP_SPEED_1G;
2459 if (caps & ADVERTISED_10000baseT_Full)
2460 v |= FW_PORT_CAP_SPEED_10G;
2461 if (caps & ADVERTISED_40000baseSR4_Full)
2462 v |= FW_PORT_CAP_SPEED_40G;
2463 return v;
2464 }
2465
2466 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2467 {
2468 const struct port_info *p = netdev_priv(dev);
2469
2470 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2471 p->port_type == FW_PORT_TYPE_BT_XFI ||
2472 p->port_type == FW_PORT_TYPE_BT_XAUI)
2473 cmd->port = PORT_TP;
2474 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2475 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2476 cmd->port = PORT_FIBRE;
2477 else if (p->port_type == FW_PORT_TYPE_SFP ||
2478 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2479 p->port_type == FW_PORT_TYPE_QSFP) {
2480 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2481 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2482 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2483 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2484 cmd->port = PORT_FIBRE;
2485 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2486 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2487 cmd->port = PORT_DA;
2488 else
2489 cmd->port = PORT_OTHER;
2490 } else
2491 cmd->port = PORT_OTHER;
2492
2493 if (p->mdio_addr >= 0) {
2494 cmd->phy_address = p->mdio_addr;
2495 cmd->transceiver = XCVR_EXTERNAL;
2496 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2497 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2498 } else {
2499 cmd->phy_address = 0; /* not really, but no better option */
2500 cmd->transceiver = XCVR_INTERNAL;
2501 cmd->mdio_support = 0;
2502 }
2503
2504 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2505 cmd->advertising = from_fw_linkcaps(p->port_type,
2506 p->link_cfg.advertising);
2507 ethtool_cmd_speed_set(cmd,
2508 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2509 cmd->duplex = DUPLEX_FULL;
2510 cmd->autoneg = p->link_cfg.autoneg;
2511 cmd->maxtxpkt = 0;
2512 cmd->maxrxpkt = 0;
2513 return 0;
2514 }
2515
2516 static unsigned int speed_to_caps(int speed)
2517 {
2518 if (speed == 100)
2519 return FW_PORT_CAP_SPEED_100M;
2520 if (speed == 1000)
2521 return FW_PORT_CAP_SPEED_1G;
2522 if (speed == 10000)
2523 return FW_PORT_CAP_SPEED_10G;
2524 if (speed == 40000)
2525 return FW_PORT_CAP_SPEED_40G;
2526 return 0;
2527 }
2528
2529 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2530 {
2531 unsigned int cap;
2532 struct port_info *p = netdev_priv(dev);
2533 struct link_config *lc = &p->link_cfg;
2534 u32 speed = ethtool_cmd_speed(cmd);
2535
2536 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2537 return -EINVAL;
2538
2539 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2540 /*
2541 * PHY offers a single speed. See if that's what's
2542 * being requested.
2543 */
2544 if (cmd->autoneg == AUTONEG_DISABLE &&
2545 (lc->supported & speed_to_caps(speed)))
2546 return 0;
2547 return -EINVAL;
2548 }
2549
2550 if (cmd->autoneg == AUTONEG_DISABLE) {
2551 cap = speed_to_caps(speed);
2552
2553 if (!(lc->supported & cap) ||
2554 (speed == 1000) ||
2555 (speed == 10000) ||
2556 (speed == 40000))
2557 return -EINVAL;
2558 lc->requested_speed = cap;
2559 lc->advertising = 0;
2560 } else {
2561 cap = to_fw_linkcaps(cmd->advertising);
2562 if (!(lc->supported & cap))
2563 return -EINVAL;
2564 lc->requested_speed = 0;
2565 lc->advertising = cap | FW_PORT_CAP_ANEG;
2566 }
2567 lc->autoneg = cmd->autoneg;
2568
2569 if (netif_running(dev))
2570 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2571 lc);
2572 return 0;
2573 }
2574
2575 static void get_pauseparam(struct net_device *dev,
2576 struct ethtool_pauseparam *epause)
2577 {
2578 struct port_info *p = netdev_priv(dev);
2579
2580 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2581 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2582 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2583 }
2584
2585 static int set_pauseparam(struct net_device *dev,
2586 struct ethtool_pauseparam *epause)
2587 {
2588 struct port_info *p = netdev_priv(dev);
2589 struct link_config *lc = &p->link_cfg;
2590
2591 if (epause->autoneg == AUTONEG_DISABLE)
2592 lc->requested_fc = 0;
2593 else if (lc->supported & FW_PORT_CAP_ANEG)
2594 lc->requested_fc = PAUSE_AUTONEG;
2595 else
2596 return -EINVAL;
2597
2598 if (epause->rx_pause)
2599 lc->requested_fc |= PAUSE_RX;
2600 if (epause->tx_pause)
2601 lc->requested_fc |= PAUSE_TX;
2602 if (netif_running(dev))
2603 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2604 lc);
2605 return 0;
2606 }
2607
2608 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2609 {
2610 const struct port_info *pi = netdev_priv(dev);
2611 const struct sge *s = &pi->adapter->sge;
2612
2613 e->rx_max_pending = MAX_RX_BUFFERS;
2614 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2615 e->rx_jumbo_max_pending = 0;
2616 e->tx_max_pending = MAX_TXQ_ENTRIES;
2617
2618 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2619 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2620 e->rx_jumbo_pending = 0;
2621 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2622 }
2623
2624 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2625 {
2626 int i;
2627 const struct port_info *pi = netdev_priv(dev);
2628 struct adapter *adapter = pi->adapter;
2629 struct sge *s = &adapter->sge;
2630
2631 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2632 e->tx_pending > MAX_TXQ_ENTRIES ||
2633 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2634 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2635 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2636 return -EINVAL;
2637
2638 if (adapter->flags & FULL_INIT_DONE)
2639 return -EBUSY;
2640
2641 for (i = 0; i < pi->nqsets; ++i) {
2642 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2643 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2644 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2645 }
2646 return 0;
2647 }
2648
2649 static int closest_timer(const struct sge *s, int time)
2650 {
2651 int i, delta, match = 0, min_delta = INT_MAX;
2652
2653 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2654 delta = time - s->timer_val[i];
2655 if (delta < 0)
2656 delta = -delta;
2657 if (delta < min_delta) {
2658 min_delta = delta;
2659 match = i;
2660 }
2661 }
2662 return match;
2663 }
2664
2665 static int closest_thres(const struct sge *s, int thres)
2666 {
2667 int i, delta, match = 0, min_delta = INT_MAX;
2668
2669 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2670 delta = thres - s->counter_val[i];
2671 if (delta < 0)
2672 delta = -delta;
2673 if (delta < min_delta) {
2674 min_delta = delta;
2675 match = i;
2676 }
2677 }
2678 return match;
2679 }
2680
2681 /*
2682 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2683 */
2684 static unsigned int qtimer_val(const struct adapter *adap,
2685 const struct sge_rspq *q)
2686 {
2687 unsigned int idx = q->intr_params >> 1;
2688
2689 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2690 }
2691
2692 /**
2693 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2694 * @q: the Rx queue
2695 * @us: the hold-off time in us, or 0 to disable timer
2696 * @cnt: the hold-off packet count, or 0 to disable counter
2697 *
2698 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2699 * one of the two needs to be enabled for the queue to generate interrupts.
2700 */
2701 static int set_rspq_intr_params(struct sge_rspq *q,
2702 unsigned int us, unsigned int cnt)
2703 {
2704 struct adapter *adap = q->adap;
2705
2706 if ((us | cnt) == 0)
2707 cnt = 1;
2708
2709 if (cnt) {
2710 int err;
2711 u32 v, new_idx;
2712
2713 new_idx = closest_thres(&adap->sge, cnt);
2714 if (q->desc && q->pktcnt_idx != new_idx) {
2715 /* the queue has already been created, update it */
2716 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2717 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2718 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2719 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2720 &new_idx);
2721 if (err)
2722 return err;
2723 }
2724 q->pktcnt_idx = new_idx;
2725 }
2726
2727 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2728 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2729 return 0;
2730 }
2731
2732 /**
2733 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2734 * @dev: the network device
2735 * @us: the hold-off time in us, or 0 to disable timer
2736 * @cnt: the hold-off packet count, or 0 to disable counter
2737 *
2738 * Set the RX interrupt hold-off parameters for a network device.
2739 */
2740 static int set_rx_intr_params(struct net_device *dev,
2741 unsigned int us, unsigned int cnt)
2742 {
2743 int i, err;
2744 struct port_info *pi = netdev_priv(dev);
2745 struct adapter *adap = pi->adapter;
2746 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2747
2748 for (i = 0; i < pi->nqsets; i++, q++) {
2749 err = set_rspq_intr_params(&q->rspq, us, cnt);
2750 if (err)
2751 return err;
2752 }
2753 return 0;
2754 }
2755
2756 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2757 {
2758 int i;
2759 struct port_info *pi = netdev_priv(dev);
2760 struct adapter *adap = pi->adapter;
2761 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2762
2763 for (i = 0; i < pi->nqsets; i++, q++)
2764 q->rspq.adaptive_rx = adaptive_rx;
2765
2766 return 0;
2767 }
2768
2769 static int get_adaptive_rx_setting(struct net_device *dev)
2770 {
2771 struct port_info *pi = netdev_priv(dev);
2772 struct adapter *adap = pi->adapter;
2773 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2774
2775 return q->rspq.adaptive_rx;
2776 }
2777
2778 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2779 {
2780 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2781 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2782 c->rx_max_coalesced_frames);
2783 }
2784
2785 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2786 {
2787 const struct port_info *pi = netdev_priv(dev);
2788 const struct adapter *adap = pi->adapter;
2789 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2790
2791 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2792 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2793 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2794 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2795 return 0;
2796 }
2797
2798 /**
2799 * eeprom_ptov - translate a physical EEPROM address to virtual
2800 * @phys_addr: the physical EEPROM address
2801 * @fn: the PCI function number
2802 * @sz: size of function-specific area
2803 *
2804 * Translate a physical EEPROM address to virtual. The first 1K is
2805 * accessed through virtual addresses starting at 31K, the rest is
2806 * accessed through virtual addresses starting at 0.
2807 *
2808 * The mapping is as follows:
2809 * [0..1K) -> [31K..32K)
2810 * [1K..1K+A) -> [31K-A..31K)
2811 * [1K+A..ES) -> [0..ES-A-1K)
2812 *
2813 * where A = @fn * @sz, and ES = EEPROM size.
2814 */
2815 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2816 {
2817 fn *= sz;
2818 if (phys_addr < 1024)
2819 return phys_addr + (31 << 10);
2820 if (phys_addr < 1024 + fn)
2821 return 31744 - fn + phys_addr - 1024;
2822 if (phys_addr < EEPROMSIZE)
2823 return phys_addr - 1024 - fn;
2824 return -EINVAL;
2825 }
2826
2827 /*
2828 * The next two routines implement eeprom read/write from physical addresses.
2829 */
2830 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2831 {
2832 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2833
2834 if (vaddr >= 0)
2835 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2836 return vaddr < 0 ? vaddr : 0;
2837 }
2838
2839 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2840 {
2841 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2842
2843 if (vaddr >= 0)
2844 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2845 return vaddr < 0 ? vaddr : 0;
2846 }
2847
2848 #define EEPROM_MAGIC 0x38E2F10C
2849
2850 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2851 u8 *data)
2852 {
2853 int i, err = 0;
2854 struct adapter *adapter = netdev2adap(dev);
2855
2856 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2857 if (!buf)
2858 return -ENOMEM;
2859
2860 e->magic = EEPROM_MAGIC;
2861 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2862 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2863
2864 if (!err)
2865 memcpy(data, buf + e->offset, e->len);
2866 kfree(buf);
2867 return err;
2868 }
2869
2870 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2871 u8 *data)
2872 {
2873 u8 *buf;
2874 int err = 0;
2875 u32 aligned_offset, aligned_len, *p;
2876 struct adapter *adapter = netdev2adap(dev);
2877
2878 if (eeprom->magic != EEPROM_MAGIC)
2879 return -EINVAL;
2880
2881 aligned_offset = eeprom->offset & ~3;
2882 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2883
2884 if (adapter->fn > 0) {
2885 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2886
2887 if (aligned_offset < start ||
2888 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2889 return -EPERM;
2890 }
2891
2892 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2893 /*
2894 * RMW possibly needed for first or last words.
2895 */
2896 buf = kmalloc(aligned_len, GFP_KERNEL);
2897 if (!buf)
2898 return -ENOMEM;
2899 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2900 if (!err && aligned_len > 4)
2901 err = eeprom_rd_phys(adapter,
2902 aligned_offset + aligned_len - 4,
2903 (u32 *)&buf[aligned_len - 4]);
2904 if (err)
2905 goto out;
2906 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2907 } else
2908 buf = data;
2909
2910 err = t4_seeprom_wp(adapter, false);
2911 if (err)
2912 goto out;
2913
2914 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2915 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2916 aligned_offset += 4;
2917 }
2918
2919 if (!err)
2920 err = t4_seeprom_wp(adapter, true);
2921 out:
2922 if (buf != data)
2923 kfree(buf);
2924 return err;
2925 }
2926
2927 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2928 {
2929 int ret;
2930 const struct firmware *fw;
2931 struct adapter *adap = netdev2adap(netdev);
2932
2933 ef->data[sizeof(ef->data) - 1] = '\0';
2934 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2935 if (ret < 0)
2936 return ret;
2937
2938 ret = t4_load_fw(adap, fw->data, fw->size);
2939 release_firmware(fw);
2940 if (!ret)
2941 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2942 return ret;
2943 }
2944
2945 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2946 #define BCAST_CRC 0xa0ccc1a6
2947
2948 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2949 {
2950 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2951 wol->wolopts = netdev2adap(dev)->wol;
2952 memset(&wol->sopass, 0, sizeof(wol->sopass));
2953 }
2954
2955 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2956 {
2957 int err = 0;
2958 struct port_info *pi = netdev_priv(dev);
2959
2960 if (wol->wolopts & ~WOL_SUPPORTED)
2961 return -EINVAL;
2962 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2963 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2964 if (wol->wolopts & WAKE_BCAST) {
2965 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2966 ~0ULL, 0, false);
2967 if (!err)
2968 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2969 ~6ULL, ~0ULL, BCAST_CRC, true);
2970 } else
2971 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2972 return err;
2973 }
2974
2975 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2976 {
2977 const struct port_info *pi = netdev_priv(dev);
2978 netdev_features_t changed = dev->features ^ features;
2979 int err;
2980
2981 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2982 return 0;
2983
2984 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2985 -1, -1, -1,
2986 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2987 if (unlikely(err))
2988 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2989 return err;
2990 }
2991
2992 static u32 get_rss_table_size(struct net_device *dev)
2993 {
2994 const struct port_info *pi = netdev_priv(dev);
2995
2996 return pi->rss_size;
2997 }
2998
2999 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
3000 {
3001 const struct port_info *pi = netdev_priv(dev);
3002 unsigned int n = pi->rss_size;
3003
3004 while (n--)
3005 p[n] = pi->rss[n];
3006 return 0;
3007 }
3008
3009 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
3010 {
3011 unsigned int i;
3012 struct port_info *pi = netdev_priv(dev);
3013
3014 for (i = 0; i < pi->rss_size; i++)
3015 pi->rss[i] = p[i];
3016 if (pi->adapter->flags & FULL_INIT_DONE)
3017 return write_rss(pi, pi->rss);
3018 return 0;
3019 }
3020
3021 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
3022 u32 *rules)
3023 {
3024 const struct port_info *pi = netdev_priv(dev);
3025
3026 switch (info->cmd) {
3027 case ETHTOOL_GRXFH: {
3028 unsigned int v = pi->rss_mode;
3029
3030 info->data = 0;
3031 switch (info->flow_type) {
3032 case TCP_V4_FLOW:
3033 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3034 info->data = RXH_IP_SRC | RXH_IP_DST |
3035 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3036 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3037 info->data = RXH_IP_SRC | RXH_IP_DST;
3038 break;
3039 case UDP_V4_FLOW:
3040 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3041 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3042 info->data = RXH_IP_SRC | RXH_IP_DST |
3043 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3044 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3045 info->data = RXH_IP_SRC | RXH_IP_DST;
3046 break;
3047 case SCTP_V4_FLOW:
3048 case AH_ESP_V4_FLOW:
3049 case IPV4_FLOW:
3050 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3051 info->data = RXH_IP_SRC | RXH_IP_DST;
3052 break;
3053 case TCP_V6_FLOW:
3054 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3055 info->data = RXH_IP_SRC | RXH_IP_DST |
3056 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3057 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3058 info->data = RXH_IP_SRC | RXH_IP_DST;
3059 break;
3060 case UDP_V6_FLOW:
3061 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3062 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3063 info->data = RXH_IP_SRC | RXH_IP_DST |
3064 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3065 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3066 info->data = RXH_IP_SRC | RXH_IP_DST;
3067 break;
3068 case SCTP_V6_FLOW:
3069 case AH_ESP_V6_FLOW:
3070 case IPV6_FLOW:
3071 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3072 info->data = RXH_IP_SRC | RXH_IP_DST;
3073 break;
3074 }
3075 return 0;
3076 }
3077 case ETHTOOL_GRXRINGS:
3078 info->data = pi->nqsets;
3079 return 0;
3080 }
3081 return -EOPNOTSUPP;
3082 }
3083
3084 static const struct ethtool_ops cxgb_ethtool_ops = {
3085 .get_settings = get_settings,
3086 .set_settings = set_settings,
3087 .get_drvinfo = get_drvinfo,
3088 .get_msglevel = get_msglevel,
3089 .set_msglevel = set_msglevel,
3090 .get_ringparam = get_sge_param,
3091 .set_ringparam = set_sge_param,
3092 .get_coalesce = get_coalesce,
3093 .set_coalesce = set_coalesce,
3094 .get_eeprom_len = get_eeprom_len,
3095 .get_eeprom = get_eeprom,
3096 .set_eeprom = set_eeprom,
3097 .get_pauseparam = get_pauseparam,
3098 .set_pauseparam = set_pauseparam,
3099 .get_link = ethtool_op_get_link,
3100 .get_strings = get_strings,
3101 .set_phys_id = identify_port,
3102 .nway_reset = restart_autoneg,
3103 .get_sset_count = get_sset_count,
3104 .get_ethtool_stats = get_stats,
3105 .get_regs_len = get_regs_len,
3106 .get_regs = get_regs,
3107 .get_wol = get_wol,
3108 .set_wol = set_wol,
3109 .get_rxnfc = get_rxnfc,
3110 .get_rxfh_indir_size = get_rss_table_size,
3111 .get_rxfh = get_rss_table,
3112 .set_rxfh = set_rss_table,
3113 .flash_device = set_flash,
3114 };
3115
3116 /*
3117 * debugfs support
3118 */
3119 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3120 loff_t *ppos)
3121 {
3122 loff_t pos = *ppos;
3123 loff_t avail = file_inode(file)->i_size;
3124 unsigned int mem = (uintptr_t)file->private_data & 3;
3125 struct adapter *adap = file->private_data - mem;
3126 __be32 *data;
3127 int ret;
3128
3129 if (pos < 0)
3130 return -EINVAL;
3131 if (pos >= avail)
3132 return 0;
3133 if (count > avail - pos)
3134 count = avail - pos;
3135
3136 data = t4_alloc_mem(count);
3137 if (!data)
3138 return -ENOMEM;
3139
3140 spin_lock(&adap->win0_lock);
3141 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3142 spin_unlock(&adap->win0_lock);
3143 if (ret) {
3144 t4_free_mem(data);
3145 return ret;
3146 }
3147 ret = copy_to_user(buf, data, count);
3148
3149 t4_free_mem(data);
3150 if (ret)
3151 return -EFAULT;
3152
3153 *ppos = pos + count;
3154 return count;
3155 }
3156
3157 static const struct file_operations mem_debugfs_fops = {
3158 .owner = THIS_MODULE,
3159 .open = simple_open,
3160 .read = mem_read,
3161 .llseek = default_llseek,
3162 };
3163
3164 static void add_debugfs_mem(struct adapter *adap, const char *name,
3165 unsigned int idx, unsigned int size_mb)
3166 {
3167 struct dentry *de;
3168
3169 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3170 (void *)adap + idx, &mem_debugfs_fops);
3171 if (de && de->d_inode)
3172 de->d_inode->i_size = size_mb << 20;
3173 }
3174
3175 static int setup_debugfs(struct adapter *adap)
3176 {
3177 int i;
3178 u32 size;
3179
3180 if (IS_ERR_OR_NULL(adap->debugfs_root))
3181 return -1;
3182
3183 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3184 if (i & EDRAM0_ENABLE) {
3185 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3186 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3187 }
3188 if (i & EDRAM1_ENABLE) {
3189 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3190 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3191 }
3192 if (is_t4(adap->params.chip)) {
3193 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3194 if (i & EXT_MEM_ENABLE)
3195 add_debugfs_mem(adap, "mc", MEM_MC,
3196 EXT_MEM_SIZE_GET(size));
3197 } else {
3198 if (i & EXT_MEM_ENABLE) {
3199 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3200 add_debugfs_mem(adap, "mc0", MEM_MC0,
3201 EXT_MEM_SIZE_GET(size));
3202 }
3203 if (i & EXT_MEM1_ENABLE) {
3204 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3205 add_debugfs_mem(adap, "mc1", MEM_MC1,
3206 EXT_MEM_SIZE_GET(size));
3207 }
3208 }
3209 if (adap->l2t)
3210 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3211 &t4_l2t_fops);
3212 return 0;
3213 }
3214
3215 /*
3216 * upper-layer driver support
3217 */
3218
3219 /*
3220 * Allocate an active-open TID and set it to the supplied value.
3221 */
3222 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3223 {
3224 int atid = -1;
3225
3226 spin_lock_bh(&t->atid_lock);
3227 if (t->afree) {
3228 union aopen_entry *p = t->afree;
3229
3230 atid = (p - t->atid_tab) + t->atid_base;
3231 t->afree = p->next;
3232 p->data = data;
3233 t->atids_in_use++;
3234 }
3235 spin_unlock_bh(&t->atid_lock);
3236 return atid;
3237 }
3238 EXPORT_SYMBOL(cxgb4_alloc_atid);
3239
3240 /*
3241 * Release an active-open TID.
3242 */
3243 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3244 {
3245 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3246
3247 spin_lock_bh(&t->atid_lock);
3248 p->next = t->afree;
3249 t->afree = p;
3250 t->atids_in_use--;
3251 spin_unlock_bh(&t->atid_lock);
3252 }
3253 EXPORT_SYMBOL(cxgb4_free_atid);
3254
3255 /*
3256 * Allocate a server TID and set it to the supplied value.
3257 */
3258 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3259 {
3260 int stid;
3261
3262 spin_lock_bh(&t->stid_lock);
3263 if (family == PF_INET) {
3264 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3265 if (stid < t->nstids)
3266 __set_bit(stid, t->stid_bmap);
3267 else
3268 stid = -1;
3269 } else {
3270 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3271 if (stid < 0)
3272 stid = -1;
3273 }
3274 if (stid >= 0) {
3275 t->stid_tab[stid].data = data;
3276 stid += t->stid_base;
3277 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3278 * This is equivalent to 4 TIDs. With CLIP enabled it
3279 * needs 2 TIDs.
3280 */
3281 if (family == PF_INET)
3282 t->stids_in_use++;
3283 else
3284 t->stids_in_use += 4;
3285 }
3286 spin_unlock_bh(&t->stid_lock);
3287 return stid;
3288 }
3289 EXPORT_SYMBOL(cxgb4_alloc_stid);
3290
3291 /* Allocate a server filter TID and set it to the supplied value.
3292 */
3293 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3294 {
3295 int stid;
3296
3297 spin_lock_bh(&t->stid_lock);
3298 if (family == PF_INET) {
3299 stid = find_next_zero_bit(t->stid_bmap,
3300 t->nstids + t->nsftids, t->nstids);
3301 if (stid < (t->nstids + t->nsftids))
3302 __set_bit(stid, t->stid_bmap);
3303 else
3304 stid = -1;
3305 } else {
3306 stid = -1;
3307 }
3308 if (stid >= 0) {
3309 t->stid_tab[stid].data = data;
3310 stid -= t->nstids;
3311 stid += t->sftid_base;
3312 t->stids_in_use++;
3313 }
3314 spin_unlock_bh(&t->stid_lock);
3315 return stid;
3316 }
3317 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3318
3319 /* Release a server TID.
3320 */
3321 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3322 {
3323 /* Is it a server filter TID? */
3324 if (t->nsftids && (stid >= t->sftid_base)) {
3325 stid -= t->sftid_base;
3326 stid += t->nstids;
3327 } else {
3328 stid -= t->stid_base;
3329 }
3330
3331 spin_lock_bh(&t->stid_lock);
3332 if (family == PF_INET)
3333 __clear_bit(stid, t->stid_bmap);
3334 else
3335 bitmap_release_region(t->stid_bmap, stid, 2);
3336 t->stid_tab[stid].data = NULL;
3337 if (family == PF_INET)
3338 t->stids_in_use--;
3339 else
3340 t->stids_in_use -= 4;
3341 spin_unlock_bh(&t->stid_lock);
3342 }
3343 EXPORT_SYMBOL(cxgb4_free_stid);
3344
3345 /*
3346 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3347 */
3348 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3349 unsigned int tid)
3350 {
3351 struct cpl_tid_release *req;
3352
3353 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3354 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3355 INIT_TP_WR(req, tid);
3356 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3357 }
3358
3359 /*
3360 * Queue a TID release request and if necessary schedule a work queue to
3361 * process it.
3362 */
3363 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3364 unsigned int tid)
3365 {
3366 void **p = &t->tid_tab[tid];
3367 struct adapter *adap = container_of(t, struct adapter, tids);
3368
3369 spin_lock_bh(&adap->tid_release_lock);
3370 *p = adap->tid_release_head;
3371 /* Low 2 bits encode the Tx channel number */
3372 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3373 if (!adap->tid_release_task_busy) {
3374 adap->tid_release_task_busy = true;
3375 queue_work(adap->workq, &adap->tid_release_task);
3376 }
3377 spin_unlock_bh(&adap->tid_release_lock);
3378 }
3379
3380 /*
3381 * Process the list of pending TID release requests.
3382 */
3383 static void process_tid_release_list(struct work_struct *work)
3384 {
3385 struct sk_buff *skb;
3386 struct adapter *adap;
3387
3388 adap = container_of(work, struct adapter, tid_release_task);
3389
3390 spin_lock_bh(&adap->tid_release_lock);
3391 while (adap->tid_release_head) {
3392 void **p = adap->tid_release_head;
3393 unsigned int chan = (uintptr_t)p & 3;
3394 p = (void *)p - chan;
3395
3396 adap->tid_release_head = *p;
3397 *p = NULL;
3398 spin_unlock_bh(&adap->tid_release_lock);
3399
3400 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3401 GFP_KERNEL)))
3402 schedule_timeout_uninterruptible(1);
3403
3404 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3405 t4_ofld_send(adap, skb);
3406 spin_lock_bh(&adap->tid_release_lock);
3407 }
3408 adap->tid_release_task_busy = false;
3409 spin_unlock_bh(&adap->tid_release_lock);
3410 }
3411
3412 /*
3413 * Release a TID and inform HW. If we are unable to allocate the release
3414 * message we defer to a work queue.
3415 */
3416 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3417 {
3418 void *old;
3419 struct sk_buff *skb;
3420 struct adapter *adap = container_of(t, struct adapter, tids);
3421
3422 old = t->tid_tab[tid];
3423 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3424 if (likely(skb)) {
3425 t->tid_tab[tid] = NULL;
3426 mk_tid_release(skb, chan, tid);
3427 t4_ofld_send(adap, skb);
3428 } else
3429 cxgb4_queue_tid_release(t, chan, tid);
3430 if (old)
3431 atomic_dec(&t->tids_in_use);
3432 }
3433 EXPORT_SYMBOL(cxgb4_remove_tid);
3434
3435 /*
3436 * Allocate and initialize the TID tables. Returns 0 on success.
3437 */
3438 static int tid_init(struct tid_info *t)
3439 {
3440 size_t size;
3441 unsigned int stid_bmap_size;
3442 unsigned int natids = t->natids;
3443 struct adapter *adap = container_of(t, struct adapter, tids);
3444
3445 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3446 size = t->ntids * sizeof(*t->tid_tab) +
3447 natids * sizeof(*t->atid_tab) +
3448 t->nstids * sizeof(*t->stid_tab) +
3449 t->nsftids * sizeof(*t->stid_tab) +
3450 stid_bmap_size * sizeof(long) +
3451 t->nftids * sizeof(*t->ftid_tab) +
3452 t->nsftids * sizeof(*t->ftid_tab);
3453
3454 t->tid_tab = t4_alloc_mem(size);
3455 if (!t->tid_tab)
3456 return -ENOMEM;
3457
3458 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3459 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3460 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3461 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3462 spin_lock_init(&t->stid_lock);
3463 spin_lock_init(&t->atid_lock);
3464
3465 t->stids_in_use = 0;
3466 t->afree = NULL;
3467 t->atids_in_use = 0;
3468 atomic_set(&t->tids_in_use, 0);
3469
3470 /* Setup the free list for atid_tab and clear the stid bitmap. */
3471 if (natids) {
3472 while (--natids)
3473 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3474 t->afree = t->atid_tab;
3475 }
3476 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3477 /* Reserve stid 0 for T4/T5 adapters */
3478 if (!t->stid_base &&
3479 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3480 __set_bit(0, t->stid_bmap);
3481
3482 return 0;
3483 }
3484
3485 int cxgb4_clip_get(const struct net_device *dev,
3486 const struct in6_addr *lip)
3487 {
3488 struct adapter *adap;
3489 struct fw_clip_cmd c;
3490
3491 adap = netdev2adap(dev);
3492 memset(&c, 0, sizeof(c));
3493 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3494 FW_CMD_REQUEST | FW_CMD_WRITE);
3495 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3496 c.ip_hi = *(__be64 *)(lip->s6_addr);
3497 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3498 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3499 }
3500 EXPORT_SYMBOL(cxgb4_clip_get);
3501
3502 int cxgb4_clip_release(const struct net_device *dev,
3503 const struct in6_addr *lip)
3504 {
3505 struct adapter *adap;
3506 struct fw_clip_cmd c;
3507
3508 adap = netdev2adap(dev);
3509 memset(&c, 0, sizeof(c));
3510 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3511 FW_CMD_REQUEST | FW_CMD_READ);
3512 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3513 c.ip_hi = *(__be64 *)(lip->s6_addr);
3514 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3515 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3516 }
3517 EXPORT_SYMBOL(cxgb4_clip_release);
3518
3519 /**
3520 * cxgb4_create_server - create an IP server
3521 * @dev: the device
3522 * @stid: the server TID
3523 * @sip: local IP address to bind server to
3524 * @sport: the server's TCP port
3525 * @queue: queue to direct messages from this server to
3526 *
3527 * Create an IP server for the given port and address.
3528 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3529 */
3530 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3531 __be32 sip, __be16 sport, __be16 vlan,
3532 unsigned int queue)
3533 {
3534 unsigned int chan;
3535 struct sk_buff *skb;
3536 struct adapter *adap;
3537 struct cpl_pass_open_req *req;
3538 int ret;
3539
3540 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3541 if (!skb)
3542 return -ENOMEM;
3543
3544 adap = netdev2adap(dev);
3545 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3546 INIT_TP_WR(req, 0);
3547 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3548 req->local_port = sport;
3549 req->peer_port = htons(0);
3550 req->local_ip = sip;
3551 req->peer_ip = htonl(0);
3552 chan = rxq_to_chan(&adap->sge, queue);
3553 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3554 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3555 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3556 ret = t4_mgmt_tx(adap, skb);
3557 return net_xmit_eval(ret);
3558 }
3559 EXPORT_SYMBOL(cxgb4_create_server);
3560
3561 /* cxgb4_create_server6 - create an IPv6 server
3562 * @dev: the device
3563 * @stid: the server TID
3564 * @sip: local IPv6 address to bind server to
3565 * @sport: the server's TCP port
3566 * @queue: queue to direct messages from this server to
3567 *
3568 * Create an IPv6 server for the given port and address.
3569 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3570 */
3571 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3572 const struct in6_addr *sip, __be16 sport,
3573 unsigned int queue)
3574 {
3575 unsigned int chan;
3576 struct sk_buff *skb;
3577 struct adapter *adap;
3578 struct cpl_pass_open_req6 *req;
3579 int ret;
3580
3581 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3582 if (!skb)
3583 return -ENOMEM;
3584
3585 adap = netdev2adap(dev);
3586 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3587 INIT_TP_WR(req, 0);
3588 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3589 req->local_port = sport;
3590 req->peer_port = htons(0);
3591 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3592 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3593 req->peer_ip_hi = cpu_to_be64(0);
3594 req->peer_ip_lo = cpu_to_be64(0);
3595 chan = rxq_to_chan(&adap->sge, queue);
3596 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3597 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3598 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3599 ret = t4_mgmt_tx(adap, skb);
3600 return net_xmit_eval(ret);
3601 }
3602 EXPORT_SYMBOL(cxgb4_create_server6);
3603
3604 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3605 unsigned int queue, bool ipv6)
3606 {
3607 struct sk_buff *skb;
3608 struct adapter *adap;
3609 struct cpl_close_listsvr_req *req;
3610 int ret;
3611
3612 adap = netdev2adap(dev);
3613
3614 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3615 if (!skb)
3616 return -ENOMEM;
3617
3618 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3619 INIT_TP_WR(req, 0);
3620 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3621 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3622 LISTSVR_IPV6(0)) | QUEUENO(queue));
3623 ret = t4_mgmt_tx(adap, skb);
3624 return net_xmit_eval(ret);
3625 }
3626 EXPORT_SYMBOL(cxgb4_remove_server);
3627
3628 /**
3629 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3630 * @mtus: the HW MTU table
3631 * @mtu: the target MTU
3632 * @idx: index of selected entry in the MTU table
3633 *
3634 * Returns the index and the value in the HW MTU table that is closest to
3635 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3636 * table, in which case that smallest available value is selected.
3637 */
3638 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3639 unsigned int *idx)
3640 {
3641 unsigned int i = 0;
3642
3643 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3644 ++i;
3645 if (idx)
3646 *idx = i;
3647 return mtus[i];
3648 }
3649 EXPORT_SYMBOL(cxgb4_best_mtu);
3650
3651 /**
3652 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3653 * @mtus: the HW MTU table
3654 * @header_size: Header Size
3655 * @data_size_max: maximum Data Segment Size
3656 * @data_size_align: desired Data Segment Size Alignment (2^N)
3657 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3658 *
3659 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3660 * MTU Table based solely on a Maximum MTU parameter, we break that
3661 * parameter up into a Header Size and Maximum Data Segment Size, and
3662 * provide a desired Data Segment Size Alignment. If we find an MTU in
3663 * the Hardware MTU Table which will result in a Data Segment Size with
3664 * the requested alignment _and_ that MTU isn't "too far" from the
3665 * closest MTU, then we'll return that rather than the closest MTU.
3666 */
3667 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3668 unsigned short header_size,
3669 unsigned short data_size_max,
3670 unsigned short data_size_align,
3671 unsigned int *mtu_idxp)
3672 {
3673 unsigned short max_mtu = header_size + data_size_max;
3674 unsigned short data_size_align_mask = data_size_align - 1;
3675 int mtu_idx, aligned_mtu_idx;
3676
3677 /* Scan the MTU Table till we find an MTU which is larger than our
3678 * Maximum MTU or we reach the end of the table. Along the way,
3679 * record the last MTU found, if any, which will result in a Data
3680 * Segment Length matching the requested alignment.
3681 */
3682 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3683 unsigned short data_size = mtus[mtu_idx] - header_size;
3684
3685 /* If this MTU minus the Header Size would result in a
3686 * Data Segment Size of the desired alignment, remember it.
3687 */
3688 if ((data_size & data_size_align_mask) == 0)
3689 aligned_mtu_idx = mtu_idx;
3690
3691 /* If we're not at the end of the Hardware MTU Table and the
3692 * next element is larger than our Maximum MTU, drop out of
3693 * the loop.
3694 */
3695 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3696 break;
3697 }
3698
3699 /* If we fell out of the loop because we ran to the end of the table,
3700 * then we just have to use the last [largest] entry.
3701 */
3702 if (mtu_idx == NMTUS)
3703 mtu_idx--;
3704
3705 /* If we found an MTU which resulted in the requested Data Segment
3706 * Length alignment and that's "not far" from the largest MTU which is
3707 * less than or equal to the maximum MTU, then use that.
3708 */
3709 if (aligned_mtu_idx >= 0 &&
3710 mtu_idx - aligned_mtu_idx <= 1)
3711 mtu_idx = aligned_mtu_idx;
3712
3713 /* If the caller has passed in an MTU Index pointer, pass the
3714 * MTU Index back. Return the MTU value.
3715 */
3716 if (mtu_idxp)
3717 *mtu_idxp = mtu_idx;
3718 return mtus[mtu_idx];
3719 }
3720 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3721
3722 /**
3723 * cxgb4_port_chan - get the HW channel of a port
3724 * @dev: the net device for the port
3725 *
3726 * Return the HW Tx channel of the given port.
3727 */
3728 unsigned int cxgb4_port_chan(const struct net_device *dev)
3729 {
3730 return netdev2pinfo(dev)->tx_chan;
3731 }
3732 EXPORT_SYMBOL(cxgb4_port_chan);
3733
3734 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3735 {
3736 struct adapter *adap = netdev2adap(dev);
3737 u32 v1, v2, lp_count, hp_count;
3738
3739 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3740 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3741 if (is_t4(adap->params.chip)) {
3742 lp_count = G_LP_COUNT(v1);
3743 hp_count = G_HP_COUNT(v1);
3744 } else {
3745 lp_count = G_LP_COUNT_T5(v1);
3746 hp_count = G_HP_COUNT_T5(v2);
3747 }
3748 return lpfifo ? lp_count : hp_count;
3749 }
3750 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3751
3752 /**
3753 * cxgb4_port_viid - get the VI id of a port
3754 * @dev: the net device for the port
3755 *
3756 * Return the VI id of the given port.
3757 */
3758 unsigned int cxgb4_port_viid(const struct net_device *dev)
3759 {
3760 return netdev2pinfo(dev)->viid;
3761 }
3762 EXPORT_SYMBOL(cxgb4_port_viid);
3763
3764 /**
3765 * cxgb4_port_idx - get the index of a port
3766 * @dev: the net device for the port
3767 *
3768 * Return the index of the given port.
3769 */
3770 unsigned int cxgb4_port_idx(const struct net_device *dev)
3771 {
3772 return netdev2pinfo(dev)->port_id;
3773 }
3774 EXPORT_SYMBOL(cxgb4_port_idx);
3775
3776 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3777 struct tp_tcp_stats *v6)
3778 {
3779 struct adapter *adap = pci_get_drvdata(pdev);
3780
3781 spin_lock(&adap->stats_lock);
3782 t4_tp_get_tcp_stats(adap, v4, v6);
3783 spin_unlock(&adap->stats_lock);
3784 }
3785 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3786
3787 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3788 const unsigned int *pgsz_order)
3789 {
3790 struct adapter *adap = netdev2adap(dev);
3791
3792 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3793 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3794 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3795 HPZ3(pgsz_order[3]));
3796 }
3797 EXPORT_SYMBOL(cxgb4_iscsi_init);
3798
3799 int cxgb4_flush_eq_cache(struct net_device *dev)
3800 {
3801 struct adapter *adap = netdev2adap(dev);
3802 int ret;
3803
3804 ret = t4_fwaddrspace_write(adap, adap->mbox,
3805 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3806 return ret;
3807 }
3808 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3809
3810 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3811 {
3812 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3813 __be64 indices;
3814 int ret;
3815
3816 spin_lock(&adap->win0_lock);
3817 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3818 sizeof(indices), (__be32 *)&indices,
3819 T4_MEMORY_READ);
3820 spin_unlock(&adap->win0_lock);
3821 if (!ret) {
3822 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3823 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3824 }
3825 return ret;
3826 }
3827
3828 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3829 u16 size)
3830 {
3831 struct adapter *adap = netdev2adap(dev);
3832 u16 hw_pidx, hw_cidx;
3833 int ret;
3834
3835 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3836 if (ret)
3837 goto out;
3838
3839 if (pidx != hw_pidx) {
3840 u16 delta;
3841
3842 if (pidx >= hw_pidx)
3843 delta = pidx - hw_pidx;
3844 else
3845 delta = size - hw_pidx + pidx;
3846 wmb();
3847 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3848 QID(qid) | PIDX(delta));
3849 }
3850 out:
3851 return ret;
3852 }
3853 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3854
3855 void cxgb4_disable_db_coalescing(struct net_device *dev)
3856 {
3857 struct adapter *adap;
3858
3859 adap = netdev2adap(dev);
3860 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3861 F_NOCOALESCE);
3862 }
3863 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3864
3865 void cxgb4_enable_db_coalescing(struct net_device *dev)
3866 {
3867 struct adapter *adap;
3868
3869 adap = netdev2adap(dev);
3870 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3871 }
3872 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3873
3874 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3875 {
3876 struct adapter *adap;
3877 u32 offset, memtype, memaddr;
3878 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3879 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3880 int ret;
3881
3882 adap = netdev2adap(dev);
3883
3884 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3885
3886 /* Figure out where the offset lands in the Memory Type/Address scheme.
3887 * This code assumes that the memory is laid out starting at offset 0
3888 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3889 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3890 * MC0, and some have both MC0 and MC1.
3891 */
3892 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3893 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3894 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3895
3896 edc0_end = edc0_size;
3897 edc1_end = edc0_end + edc1_size;
3898 mc0_end = edc1_end + mc0_size;
3899
3900 if (offset < edc0_end) {
3901 memtype = MEM_EDC0;
3902 memaddr = offset;
3903 } else if (offset < edc1_end) {
3904 memtype = MEM_EDC1;
3905 memaddr = offset - edc0_end;
3906 } else {
3907 if (offset < mc0_end) {
3908 memtype = MEM_MC0;
3909 memaddr = offset - edc1_end;
3910 } else if (is_t4(adap->params.chip)) {
3911 /* T4 only has a single memory channel */
3912 goto err;
3913 } else {
3914 mc1_size = EXT_MEM_SIZE_GET(
3915 t4_read_reg(adap,
3916 MA_EXT_MEMORY1_BAR)) << 20;
3917 mc1_end = mc0_end + mc1_size;
3918 if (offset < mc1_end) {
3919 memtype = MEM_MC1;
3920 memaddr = offset - mc0_end;
3921 } else {
3922 /* offset beyond the end of any memory */
3923 goto err;
3924 }
3925 }
3926 }
3927
3928 spin_lock(&adap->win0_lock);
3929 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3930 spin_unlock(&adap->win0_lock);
3931 return ret;
3932
3933 err:
3934 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3935 stag, offset);
3936 return -EINVAL;
3937 }
3938 EXPORT_SYMBOL(cxgb4_read_tpte);
3939
3940 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3941 {
3942 u32 hi, lo;
3943 struct adapter *adap;
3944
3945 adap = netdev2adap(dev);
3946 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3947 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3948
3949 return ((u64)hi << 32) | (u64)lo;
3950 }
3951 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3952
3953 static struct pci_driver cxgb4_driver;
3954
3955 static void check_neigh_update(struct neighbour *neigh)
3956 {
3957 const struct device *parent;
3958 const struct net_device *netdev = neigh->dev;
3959
3960 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3961 netdev = vlan_dev_real_dev(netdev);
3962 parent = netdev->dev.parent;
3963 if (parent && parent->driver == &cxgb4_driver.driver)
3964 t4_l2t_update(dev_get_drvdata(parent), neigh);
3965 }
3966
3967 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3968 void *data)
3969 {
3970 switch (event) {
3971 case NETEVENT_NEIGH_UPDATE:
3972 check_neigh_update(data);
3973 break;
3974 case NETEVENT_REDIRECT:
3975 default:
3976 break;
3977 }
3978 return 0;
3979 }
3980
3981 static bool netevent_registered;
3982 static struct notifier_block cxgb4_netevent_nb = {
3983 .notifier_call = netevent_cb
3984 };
3985
3986 static void drain_db_fifo(struct adapter *adap, int usecs)
3987 {
3988 u32 v1, v2, lp_count, hp_count;
3989
3990 do {
3991 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3992 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3993 if (is_t4(adap->params.chip)) {
3994 lp_count = G_LP_COUNT(v1);
3995 hp_count = G_HP_COUNT(v1);
3996 } else {
3997 lp_count = G_LP_COUNT_T5(v1);
3998 hp_count = G_HP_COUNT_T5(v2);
3999 }
4000
4001 if (lp_count == 0 && hp_count == 0)
4002 break;
4003 set_current_state(TASK_UNINTERRUPTIBLE);
4004 schedule_timeout(usecs_to_jiffies(usecs));
4005 } while (1);
4006 }
4007
4008 static void disable_txq_db(struct sge_txq *q)
4009 {
4010 unsigned long flags;
4011
4012 spin_lock_irqsave(&q->db_lock, flags);
4013 q->db_disabled = 1;
4014 spin_unlock_irqrestore(&q->db_lock, flags);
4015 }
4016
4017 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
4018 {
4019 spin_lock_irq(&q->db_lock);
4020 if (q->db_pidx_inc) {
4021 /* Make sure that all writes to the TX descriptors
4022 * are committed before we tell HW about them.
4023 */
4024 wmb();
4025 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4026 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
4027 q->db_pidx_inc = 0;
4028 }
4029 q->db_disabled = 0;
4030 spin_unlock_irq(&q->db_lock);
4031 }
4032
4033 static void disable_dbs(struct adapter *adap)
4034 {
4035 int i;
4036
4037 for_each_ethrxq(&adap->sge, i)
4038 disable_txq_db(&adap->sge.ethtxq[i].q);
4039 for_each_ofldrxq(&adap->sge, i)
4040 disable_txq_db(&adap->sge.ofldtxq[i].q);
4041 for_each_port(adap, i)
4042 disable_txq_db(&adap->sge.ctrlq[i].q);
4043 }
4044
4045 static void enable_dbs(struct adapter *adap)
4046 {
4047 int i;
4048
4049 for_each_ethrxq(&adap->sge, i)
4050 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
4051 for_each_ofldrxq(&adap->sge, i)
4052 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
4053 for_each_port(adap, i)
4054 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4055 }
4056
4057 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4058 {
4059 if (adap->uld_handle[CXGB4_ULD_RDMA])
4060 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4061 cmd);
4062 }
4063
4064 static void process_db_full(struct work_struct *work)
4065 {
4066 struct adapter *adap;
4067
4068 adap = container_of(work, struct adapter, db_full_task);
4069
4070 drain_db_fifo(adap, dbfifo_drain_delay);
4071 enable_dbs(adap);
4072 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4073 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4074 DBFIFO_HP_INT | DBFIFO_LP_INT,
4075 DBFIFO_HP_INT | DBFIFO_LP_INT);
4076 }
4077
4078 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4079 {
4080 u16 hw_pidx, hw_cidx;
4081 int ret;
4082
4083 spin_lock_irq(&q->db_lock);
4084 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4085 if (ret)
4086 goto out;
4087 if (q->db_pidx != hw_pidx) {
4088 u16 delta;
4089
4090 if (q->db_pidx >= hw_pidx)
4091 delta = q->db_pidx - hw_pidx;
4092 else
4093 delta = q->size - hw_pidx + q->db_pidx;
4094 wmb();
4095 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4096 QID(q->cntxt_id) | PIDX(delta));
4097 }
4098 out:
4099 q->db_disabled = 0;
4100 q->db_pidx_inc = 0;
4101 spin_unlock_irq(&q->db_lock);
4102 if (ret)
4103 CH_WARN(adap, "DB drop recovery failed.\n");
4104 }
4105 static void recover_all_queues(struct adapter *adap)
4106 {
4107 int i;
4108
4109 for_each_ethrxq(&adap->sge, i)
4110 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4111 for_each_ofldrxq(&adap->sge, i)
4112 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4113 for_each_port(adap, i)
4114 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4115 }
4116
4117 static void process_db_drop(struct work_struct *work)
4118 {
4119 struct adapter *adap;
4120
4121 adap = container_of(work, struct adapter, db_drop_task);
4122
4123 if (is_t4(adap->params.chip)) {
4124 drain_db_fifo(adap, dbfifo_drain_delay);
4125 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4126 drain_db_fifo(adap, dbfifo_drain_delay);
4127 recover_all_queues(adap);
4128 drain_db_fifo(adap, dbfifo_drain_delay);
4129 enable_dbs(adap);
4130 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4131 } else {
4132 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4133 u16 qid = (dropped_db >> 15) & 0x1ffff;
4134 u16 pidx_inc = dropped_db & 0x1fff;
4135 unsigned int s_qpp;
4136 unsigned short udb_density;
4137 unsigned long qpshift;
4138 int page;
4139 u32 udb;
4140
4141 dev_warn(adap->pdev_dev,
4142 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4143 dropped_db, qid,
4144 (dropped_db >> 14) & 1,
4145 (dropped_db >> 13) & 1,
4146 pidx_inc);
4147
4148 drain_db_fifo(adap, 1);
4149
4150 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4151 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4152 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4153 qpshift = PAGE_SHIFT - ilog2(udb_density);
4154 udb = qid << qpshift;
4155 udb &= PAGE_MASK;
4156 page = udb / PAGE_SIZE;
4157 udb += (qid - (page * udb_density)) * 128;
4158
4159 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4160
4161 /* Re-enable BAR2 WC */
4162 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4163 }
4164
4165 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4166 }
4167
4168 void t4_db_full(struct adapter *adap)
4169 {
4170 if (is_t4(adap->params.chip)) {
4171 disable_dbs(adap);
4172 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4173 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4174 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4175 queue_work(adap->workq, &adap->db_full_task);
4176 }
4177 }
4178
4179 void t4_db_dropped(struct adapter *adap)
4180 {
4181 if (is_t4(adap->params.chip)) {
4182 disable_dbs(adap);
4183 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4184 }
4185 queue_work(adap->workq, &adap->db_drop_task);
4186 }
4187
4188 static void uld_attach(struct adapter *adap, unsigned int uld)
4189 {
4190 void *handle;
4191 struct cxgb4_lld_info lli;
4192 unsigned short i;
4193
4194 lli.pdev = adap->pdev;
4195 lli.pf = adap->fn;
4196 lli.l2t = adap->l2t;
4197 lli.tids = &adap->tids;
4198 lli.ports = adap->port;
4199 lli.vr = &adap->vres;
4200 lli.mtus = adap->params.mtus;
4201 if (uld == CXGB4_ULD_RDMA) {
4202 lli.rxq_ids = adap->sge.rdma_rxq;
4203 lli.ciq_ids = adap->sge.rdma_ciq;
4204 lli.nrxq = adap->sge.rdmaqs;
4205 lli.nciq = adap->sge.rdmaciqs;
4206 } else if (uld == CXGB4_ULD_ISCSI) {
4207 lli.rxq_ids = adap->sge.ofld_rxq;
4208 lli.nrxq = adap->sge.ofldqsets;
4209 }
4210 lli.ntxq = adap->sge.ofldqsets;
4211 lli.nchan = adap->params.nports;
4212 lli.nports = adap->params.nports;
4213 lli.wr_cred = adap->params.ofldq_wr_cred;
4214 lli.adapter_type = adap->params.chip;
4215 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4216 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4217 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4218 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4219 (adap->fn * 4));
4220 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4221 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4222 (adap->fn * 4));
4223 lli.filt_mode = adap->params.tp.vlan_pri_map;
4224 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4225 for (i = 0; i < NCHAN; i++)
4226 lli.tx_modq[i] = i;
4227 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4228 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4229 lli.fw_vers = adap->params.fw_vers;
4230 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4231 lli.sge_ingpadboundary = adap->sge.fl_align;
4232 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4233 lli.sge_pktshift = adap->sge.pktshift;
4234 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4235 lli.max_ordird_qp = adap->params.max_ordird_qp;
4236 lli.max_ird_adapter = adap->params.max_ird_adapter;
4237 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4238
4239 handle = ulds[uld].add(&lli);
4240 if (IS_ERR(handle)) {
4241 dev_warn(adap->pdev_dev,
4242 "could not attach to the %s driver, error %ld\n",
4243 uld_str[uld], PTR_ERR(handle));
4244 return;
4245 }
4246
4247 adap->uld_handle[uld] = handle;
4248
4249 if (!netevent_registered) {
4250 register_netevent_notifier(&cxgb4_netevent_nb);
4251 netevent_registered = true;
4252 }
4253
4254 if (adap->flags & FULL_INIT_DONE)
4255 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4256 }
4257
4258 static void attach_ulds(struct adapter *adap)
4259 {
4260 unsigned int i;
4261
4262 spin_lock(&adap_rcu_lock);
4263 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4264 spin_unlock(&adap_rcu_lock);
4265
4266 mutex_lock(&uld_mutex);
4267 list_add_tail(&adap->list_node, &adapter_list);
4268 for (i = 0; i < CXGB4_ULD_MAX; i++)
4269 if (ulds[i].add)
4270 uld_attach(adap, i);
4271 mutex_unlock(&uld_mutex);
4272 }
4273
4274 static void detach_ulds(struct adapter *adap)
4275 {
4276 unsigned int i;
4277
4278 mutex_lock(&uld_mutex);
4279 list_del(&adap->list_node);
4280 for (i = 0; i < CXGB4_ULD_MAX; i++)
4281 if (adap->uld_handle[i]) {
4282 ulds[i].state_change(adap->uld_handle[i],
4283 CXGB4_STATE_DETACH);
4284 adap->uld_handle[i] = NULL;
4285 }
4286 if (netevent_registered && list_empty(&adapter_list)) {
4287 unregister_netevent_notifier(&cxgb4_netevent_nb);
4288 netevent_registered = false;
4289 }
4290 mutex_unlock(&uld_mutex);
4291
4292 spin_lock(&adap_rcu_lock);
4293 list_del_rcu(&adap->rcu_node);
4294 spin_unlock(&adap_rcu_lock);
4295 }
4296
4297 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4298 {
4299 unsigned int i;
4300
4301 mutex_lock(&uld_mutex);
4302 for (i = 0; i < CXGB4_ULD_MAX; i++)
4303 if (adap->uld_handle[i])
4304 ulds[i].state_change(adap->uld_handle[i], new_state);
4305 mutex_unlock(&uld_mutex);
4306 }
4307
4308 /**
4309 * cxgb4_register_uld - register an upper-layer driver
4310 * @type: the ULD type
4311 * @p: the ULD methods
4312 *
4313 * Registers an upper-layer driver with this driver and notifies the ULD
4314 * about any presently available devices that support its type. Returns
4315 * %-EBUSY if a ULD of the same type is already registered.
4316 */
4317 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4318 {
4319 int ret = 0;
4320 struct adapter *adap;
4321
4322 if (type >= CXGB4_ULD_MAX)
4323 return -EINVAL;
4324 mutex_lock(&uld_mutex);
4325 if (ulds[type].add) {
4326 ret = -EBUSY;
4327 goto out;
4328 }
4329 ulds[type] = *p;
4330 list_for_each_entry(adap, &adapter_list, list_node)
4331 uld_attach(adap, type);
4332 out: mutex_unlock(&uld_mutex);
4333 return ret;
4334 }
4335 EXPORT_SYMBOL(cxgb4_register_uld);
4336
4337 /**
4338 * cxgb4_unregister_uld - unregister an upper-layer driver
4339 * @type: the ULD type
4340 *
4341 * Unregisters an existing upper-layer driver.
4342 */
4343 int cxgb4_unregister_uld(enum cxgb4_uld type)
4344 {
4345 struct adapter *adap;
4346
4347 if (type >= CXGB4_ULD_MAX)
4348 return -EINVAL;
4349 mutex_lock(&uld_mutex);
4350 list_for_each_entry(adap, &adapter_list, list_node)
4351 adap->uld_handle[type] = NULL;
4352 ulds[type].add = NULL;
4353 mutex_unlock(&uld_mutex);
4354 return 0;
4355 }
4356 EXPORT_SYMBOL(cxgb4_unregister_uld);
4357
4358 /* Check if netdev on which event is occured belongs to us or not. Return
4359 * success (true) if it belongs otherwise failure (false).
4360 * Called with rcu_read_lock() held.
4361 */
4362 static bool cxgb4_netdev(const struct net_device *netdev)
4363 {
4364 struct adapter *adap;
4365 int i;
4366
4367 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4368 for (i = 0; i < MAX_NPORTS; i++)
4369 if (adap->port[i] == netdev)
4370 return true;
4371 return false;
4372 }
4373
4374 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4375 unsigned long event)
4376 {
4377 int ret = NOTIFY_DONE;
4378
4379 rcu_read_lock();
4380 if (cxgb4_netdev(event_dev)) {
4381 switch (event) {
4382 case NETDEV_UP:
4383 ret = cxgb4_clip_get(event_dev,
4384 (const struct in6_addr *)ifa->addr.s6_addr);
4385 if (ret < 0) {
4386 rcu_read_unlock();
4387 return ret;
4388 }
4389 ret = NOTIFY_OK;
4390 break;
4391 case NETDEV_DOWN:
4392 cxgb4_clip_release(event_dev,
4393 (const struct in6_addr *)ifa->addr.s6_addr);
4394 ret = NOTIFY_OK;
4395 break;
4396 default:
4397 break;
4398 }
4399 }
4400 rcu_read_unlock();
4401 return ret;
4402 }
4403
4404 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4405 unsigned long event, void *data)
4406 {
4407 struct inet6_ifaddr *ifa = data;
4408 struct net_device *event_dev;
4409 int ret = NOTIFY_DONE;
4410 struct bonding *bond = netdev_priv(ifa->idev->dev);
4411 struct list_head *iter;
4412 struct slave *slave;
4413 struct pci_dev *first_pdev = NULL;
4414
4415 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4416 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4417 ret = clip_add(event_dev, ifa, event);
4418 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4419 /* It is possible that two different adapters are bonded in one
4420 * bond. We need to find such different adapters and add clip
4421 * in all of them only once.
4422 */
4423 bond_for_each_slave(bond, slave, iter) {
4424 if (!first_pdev) {
4425 ret = clip_add(slave->dev, ifa, event);
4426 /* If clip_add is success then only initialize
4427 * first_pdev since it means it is our device
4428 */
4429 if (ret == NOTIFY_OK)
4430 first_pdev = to_pci_dev(
4431 slave->dev->dev.parent);
4432 } else if (first_pdev !=
4433 to_pci_dev(slave->dev->dev.parent))
4434 ret = clip_add(slave->dev, ifa, event);
4435 }
4436 } else
4437 ret = clip_add(ifa->idev->dev, ifa, event);
4438
4439 return ret;
4440 }
4441
4442 static struct notifier_block cxgb4_inet6addr_notifier = {
4443 .notifier_call = cxgb4_inet6addr_handler
4444 };
4445
4446 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4447 * a physical device.
4448 * The physical device reference is needed to send the actul CLIP command.
4449 */
4450 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4451 {
4452 struct inet6_dev *idev = NULL;
4453 struct inet6_ifaddr *ifa;
4454 int ret = 0;
4455
4456 idev = __in6_dev_get(root_dev);
4457 if (!idev)
4458 return ret;
4459
4460 read_lock_bh(&idev->lock);
4461 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4462 ret = cxgb4_clip_get(dev,
4463 (const struct in6_addr *)ifa->addr.s6_addr);
4464 if (ret < 0)
4465 break;
4466 }
4467 read_unlock_bh(&idev->lock);
4468
4469 return ret;
4470 }
4471
4472 static int update_root_dev_clip(struct net_device *dev)
4473 {
4474 struct net_device *root_dev = NULL;
4475 int i, ret = 0;
4476
4477 /* First populate the real net device's IPv6 addresses */
4478 ret = update_dev_clip(dev, dev);
4479 if (ret)
4480 return ret;
4481
4482 /* Parse all bond and vlan devices layered on top of the physical dev */
4483 for (i = 0; i < VLAN_N_VID; i++) {
4484 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4485 if (!root_dev)
4486 continue;
4487
4488 ret = update_dev_clip(root_dev, dev);
4489 if (ret)
4490 break;
4491 }
4492 return ret;
4493 }
4494
4495 static void update_clip(const struct adapter *adap)
4496 {
4497 int i;
4498 struct net_device *dev;
4499 int ret;
4500
4501 rcu_read_lock();
4502
4503 for (i = 0; i < MAX_NPORTS; i++) {
4504 dev = adap->port[i];
4505 ret = 0;
4506
4507 if (dev)
4508 ret = update_root_dev_clip(dev);
4509
4510 if (ret < 0)
4511 break;
4512 }
4513 rcu_read_unlock();
4514 }
4515
4516 /**
4517 * cxgb_up - enable the adapter
4518 * @adap: adapter being enabled
4519 *
4520 * Called when the first port is enabled, this function performs the
4521 * actions necessary to make an adapter operational, such as completing
4522 * the initialization of HW modules, and enabling interrupts.
4523 *
4524 * Must be called with the rtnl lock held.
4525 */
4526 static int cxgb_up(struct adapter *adap)
4527 {
4528 int err;
4529
4530 err = setup_sge_queues(adap);
4531 if (err)
4532 goto out;
4533 err = setup_rss(adap);
4534 if (err)
4535 goto freeq;
4536
4537 if (adap->flags & USING_MSIX) {
4538 name_msix_vecs(adap);
4539 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4540 adap->msix_info[0].desc, adap);
4541 if (err)
4542 goto irq_err;
4543
4544 err = request_msix_queue_irqs(adap);
4545 if (err) {
4546 free_irq(adap->msix_info[0].vec, adap);
4547 goto irq_err;
4548 }
4549 } else {
4550 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4551 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4552 adap->port[0]->name, adap);
4553 if (err)
4554 goto irq_err;
4555 }
4556 enable_rx(adap);
4557 t4_sge_start(adap);
4558 t4_intr_enable(adap);
4559 adap->flags |= FULL_INIT_DONE;
4560 notify_ulds(adap, CXGB4_STATE_UP);
4561 update_clip(adap);
4562 out:
4563 return err;
4564 irq_err:
4565 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4566 freeq:
4567 t4_free_sge_resources(adap);
4568 goto out;
4569 }
4570
4571 static void cxgb_down(struct adapter *adapter)
4572 {
4573 t4_intr_disable(adapter);
4574 cancel_work_sync(&adapter->tid_release_task);
4575 cancel_work_sync(&adapter->db_full_task);
4576 cancel_work_sync(&adapter->db_drop_task);
4577 adapter->tid_release_task_busy = false;
4578 adapter->tid_release_head = NULL;
4579
4580 if (adapter->flags & USING_MSIX) {
4581 free_msix_queue_irqs(adapter);
4582 free_irq(adapter->msix_info[0].vec, adapter);
4583 } else
4584 free_irq(adapter->pdev->irq, adapter);
4585 quiesce_rx(adapter);
4586 t4_sge_stop(adapter);
4587 t4_free_sge_resources(adapter);
4588 adapter->flags &= ~FULL_INIT_DONE;
4589 }
4590
4591 /*
4592 * net_device operations
4593 */
4594 static int cxgb_open(struct net_device *dev)
4595 {
4596 int err;
4597 struct port_info *pi = netdev_priv(dev);
4598 struct adapter *adapter = pi->adapter;
4599
4600 netif_carrier_off(dev);
4601
4602 if (!(adapter->flags & FULL_INIT_DONE)) {
4603 err = cxgb_up(adapter);
4604 if (err < 0)
4605 return err;
4606 }
4607
4608 err = link_start(dev);
4609 if (!err)
4610 netif_tx_start_all_queues(dev);
4611 return err;
4612 }
4613
4614 static int cxgb_close(struct net_device *dev)
4615 {
4616 struct port_info *pi = netdev_priv(dev);
4617 struct adapter *adapter = pi->adapter;
4618
4619 netif_tx_stop_all_queues(dev);
4620 netif_carrier_off(dev);
4621 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4622 }
4623
4624 /* Return an error number if the indicated filter isn't writable ...
4625 */
4626 static int writable_filter(struct filter_entry *f)
4627 {
4628 if (f->locked)
4629 return -EPERM;
4630 if (f->pending)
4631 return -EBUSY;
4632
4633 return 0;
4634 }
4635
4636 /* Delete the filter at the specified index (if valid). The checks for all
4637 * the common problems with doing this like the filter being locked, currently
4638 * pending in another operation, etc.
4639 */
4640 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4641 {
4642 struct filter_entry *f;
4643 int ret;
4644
4645 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4646 return -EINVAL;
4647
4648 f = &adapter->tids.ftid_tab[fidx];
4649 ret = writable_filter(f);
4650 if (ret)
4651 return ret;
4652 if (f->valid)
4653 return del_filter_wr(adapter, fidx);
4654
4655 return 0;
4656 }
4657
4658 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4659 __be32 sip, __be16 sport, __be16 vlan,
4660 unsigned int queue, unsigned char port, unsigned char mask)
4661 {
4662 int ret;
4663 struct filter_entry *f;
4664 struct adapter *adap;
4665 int i;
4666 u8 *val;
4667
4668 adap = netdev2adap(dev);
4669
4670 /* Adjust stid to correct filter index */
4671 stid -= adap->tids.sftid_base;
4672 stid += adap->tids.nftids;
4673
4674 /* Check to make sure the filter requested is writable ...
4675 */
4676 f = &adap->tids.ftid_tab[stid];
4677 ret = writable_filter(f);
4678 if (ret)
4679 return ret;
4680
4681 /* Clear out any old resources being used by the filter before
4682 * we start constructing the new filter.
4683 */
4684 if (f->valid)
4685 clear_filter(adap, f);
4686
4687 /* Clear out filter specifications */
4688 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4689 f->fs.val.lport = cpu_to_be16(sport);
4690 f->fs.mask.lport = ~0;
4691 val = (u8 *)&sip;
4692 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4693 for (i = 0; i < 4; i++) {
4694 f->fs.val.lip[i] = val[i];
4695 f->fs.mask.lip[i] = ~0;
4696 }
4697 if (adap->params.tp.vlan_pri_map & F_PORT) {
4698 f->fs.val.iport = port;
4699 f->fs.mask.iport = mask;
4700 }
4701 }
4702
4703 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4704 f->fs.val.proto = IPPROTO_TCP;
4705 f->fs.mask.proto = ~0;
4706 }
4707
4708 f->fs.dirsteer = 1;
4709 f->fs.iq = queue;
4710 /* Mark filter as locked */
4711 f->locked = 1;
4712 f->fs.rpttid = 1;
4713
4714 ret = set_filter_wr(adap, stid);
4715 if (ret) {
4716 clear_filter(adap, f);
4717 return ret;
4718 }
4719
4720 return 0;
4721 }
4722 EXPORT_SYMBOL(cxgb4_create_server_filter);
4723
4724 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4725 unsigned int queue, bool ipv6)
4726 {
4727 int ret;
4728 struct filter_entry *f;
4729 struct adapter *adap;
4730
4731 adap = netdev2adap(dev);
4732
4733 /* Adjust stid to correct filter index */
4734 stid -= adap->tids.sftid_base;
4735 stid += adap->tids.nftids;
4736
4737 f = &adap->tids.ftid_tab[stid];
4738 /* Unlock the filter */
4739 f->locked = 0;
4740
4741 ret = delete_filter(adap, stid);
4742 if (ret)
4743 return ret;
4744
4745 return 0;
4746 }
4747 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4748
4749 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4750 struct rtnl_link_stats64 *ns)
4751 {
4752 struct port_stats stats;
4753 struct port_info *p = netdev_priv(dev);
4754 struct adapter *adapter = p->adapter;
4755
4756 /* Block retrieving statistics during EEH error
4757 * recovery. Otherwise, the recovery might fail
4758 * and the PCI device will be removed permanently
4759 */
4760 spin_lock(&adapter->stats_lock);
4761 if (!netif_device_present(dev)) {
4762 spin_unlock(&adapter->stats_lock);
4763 return ns;
4764 }
4765 t4_get_port_stats(adapter, p->tx_chan, &stats);
4766 spin_unlock(&adapter->stats_lock);
4767
4768 ns->tx_bytes = stats.tx_octets;
4769 ns->tx_packets = stats.tx_frames;
4770 ns->rx_bytes = stats.rx_octets;
4771 ns->rx_packets = stats.rx_frames;
4772 ns->multicast = stats.rx_mcast_frames;
4773
4774 /* detailed rx_errors */
4775 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4776 stats.rx_runt;
4777 ns->rx_over_errors = 0;
4778 ns->rx_crc_errors = stats.rx_fcs_err;
4779 ns->rx_frame_errors = stats.rx_symbol_err;
4780 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4781 stats.rx_ovflow2 + stats.rx_ovflow3 +
4782 stats.rx_trunc0 + stats.rx_trunc1 +
4783 stats.rx_trunc2 + stats.rx_trunc3;
4784 ns->rx_missed_errors = 0;
4785
4786 /* detailed tx_errors */
4787 ns->tx_aborted_errors = 0;
4788 ns->tx_carrier_errors = 0;
4789 ns->tx_fifo_errors = 0;
4790 ns->tx_heartbeat_errors = 0;
4791 ns->tx_window_errors = 0;
4792
4793 ns->tx_errors = stats.tx_error_frames;
4794 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4795 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4796 return ns;
4797 }
4798
4799 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4800 {
4801 unsigned int mbox;
4802 int ret = 0, prtad, devad;
4803 struct port_info *pi = netdev_priv(dev);
4804 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4805
4806 switch (cmd) {
4807 case SIOCGMIIPHY:
4808 if (pi->mdio_addr < 0)
4809 return -EOPNOTSUPP;
4810 data->phy_id = pi->mdio_addr;
4811 break;
4812 case SIOCGMIIREG:
4813 case SIOCSMIIREG:
4814 if (mdio_phy_id_is_c45(data->phy_id)) {
4815 prtad = mdio_phy_id_prtad(data->phy_id);
4816 devad = mdio_phy_id_devad(data->phy_id);
4817 } else if (data->phy_id < 32) {
4818 prtad = data->phy_id;
4819 devad = 0;
4820 data->reg_num &= 0x1f;
4821 } else
4822 return -EINVAL;
4823
4824 mbox = pi->adapter->fn;
4825 if (cmd == SIOCGMIIREG)
4826 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4827 data->reg_num, &data->val_out);
4828 else
4829 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4830 data->reg_num, data->val_in);
4831 break;
4832 default:
4833 return -EOPNOTSUPP;
4834 }
4835 return ret;
4836 }
4837
4838 static void cxgb_set_rxmode(struct net_device *dev)
4839 {
4840 /* unfortunately we can't return errors to the stack */
4841 set_rxmode(dev, -1, false);
4842 }
4843
4844 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4845 {
4846 int ret;
4847 struct port_info *pi = netdev_priv(dev);
4848
4849 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4850 return -EINVAL;
4851 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4852 -1, -1, -1, true);
4853 if (!ret)
4854 dev->mtu = new_mtu;
4855 return ret;
4856 }
4857
4858 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4859 {
4860 int ret;
4861 struct sockaddr *addr = p;
4862 struct port_info *pi = netdev_priv(dev);
4863
4864 if (!is_valid_ether_addr(addr->sa_data))
4865 return -EADDRNOTAVAIL;
4866
4867 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4868 pi->xact_addr_filt, addr->sa_data, true, true);
4869 if (ret < 0)
4870 return ret;
4871
4872 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4873 pi->xact_addr_filt = ret;
4874 return 0;
4875 }
4876
4877 #ifdef CONFIG_NET_POLL_CONTROLLER
4878 static void cxgb_netpoll(struct net_device *dev)
4879 {
4880 struct port_info *pi = netdev_priv(dev);
4881 struct adapter *adap = pi->adapter;
4882
4883 if (adap->flags & USING_MSIX) {
4884 int i;
4885 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4886
4887 for (i = pi->nqsets; i; i--, rx++)
4888 t4_sge_intr_msix(0, &rx->rspq);
4889 } else
4890 t4_intr_handler(adap)(0, adap);
4891 }
4892 #endif
4893
4894 static const struct net_device_ops cxgb4_netdev_ops = {
4895 .ndo_open = cxgb_open,
4896 .ndo_stop = cxgb_close,
4897 .ndo_start_xmit = t4_eth_xmit,
4898 .ndo_select_queue = cxgb_select_queue,
4899 .ndo_get_stats64 = cxgb_get_stats,
4900 .ndo_set_rx_mode = cxgb_set_rxmode,
4901 .ndo_set_mac_address = cxgb_set_mac_addr,
4902 .ndo_set_features = cxgb_set_features,
4903 .ndo_validate_addr = eth_validate_addr,
4904 .ndo_do_ioctl = cxgb_ioctl,
4905 .ndo_change_mtu = cxgb_change_mtu,
4906 #ifdef CONFIG_NET_POLL_CONTROLLER
4907 .ndo_poll_controller = cxgb_netpoll,
4908 #endif
4909 };
4910
4911 void t4_fatal_err(struct adapter *adap)
4912 {
4913 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4914 t4_intr_disable(adap);
4915 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4916 }
4917
4918 /* Return the specified PCI-E Configuration Space register from our Physical
4919 * Function. We try first via a Firmware LDST Command since we prefer to let
4920 * the firmware own all of these registers, but if that fails we go for it
4921 * directly ourselves.
4922 */
4923 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4924 {
4925 struct fw_ldst_cmd ldst_cmd;
4926 u32 val;
4927 int ret;
4928
4929 /* Construct and send the Firmware LDST Command to retrieve the
4930 * specified PCI-E Configuration Space register.
4931 */
4932 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4933 ldst_cmd.op_to_addrspace =
4934 htonl(FW_CMD_OP(FW_LDST_CMD) |
4935 FW_CMD_REQUEST |
4936 FW_CMD_READ |
4937 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4938 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4939 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4940 ldst_cmd.u.pcie.ctrl_to_fn =
4941 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4942 ldst_cmd.u.pcie.r = reg;
4943 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4944 &ldst_cmd);
4945
4946 /* If the LDST Command suucceeded, exctract the returned register
4947 * value. Otherwise read it directly ourself.
4948 */
4949 if (ret == 0)
4950 val = ntohl(ldst_cmd.u.pcie.data[0]);
4951 else
4952 t4_hw_pci_read_cfg4(adap, reg, &val);
4953
4954 return val;
4955 }
4956
4957 static void setup_memwin(struct adapter *adap)
4958 {
4959 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4960
4961 if (is_t4(adap->params.chip)) {
4962 u32 bar0;
4963
4964 /* Truncation intentional: we only read the bottom 32-bits of
4965 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4966 * mechanism to read BAR0 instead of using
4967 * pci_resource_start() because we could be operating from
4968 * within a Virtual Machine which is trapping our accesses to
4969 * our Configuration Space and we need to set up the PCI-E
4970 * Memory Window decoders with the actual addresses which will
4971 * be coming across the PCI-E link.
4972 */
4973 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4974 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4975 adap->t4_bar0 = bar0;
4976
4977 mem_win0_base = bar0 + MEMWIN0_BASE;
4978 mem_win1_base = bar0 + MEMWIN1_BASE;
4979 mem_win2_base = bar0 + MEMWIN2_BASE;
4980 mem_win2_aperture = MEMWIN2_APERTURE;
4981 } else {
4982 /* For T5, only relative offset inside the PCIe BAR is passed */
4983 mem_win0_base = MEMWIN0_BASE;
4984 mem_win1_base = MEMWIN1_BASE;
4985 mem_win2_base = MEMWIN2_BASE_T5;
4986 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4987 }
4988 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4989 mem_win0_base | BIR(0) |
4990 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4991 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4992 mem_win1_base | BIR(0) |
4993 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4994 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4995 mem_win2_base | BIR(0) |
4996 WINDOW(ilog2(mem_win2_aperture) - 10));
4997 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4998 }
4999
5000 static void setup_memwin_rdma(struct adapter *adap)
5001 {
5002 if (adap->vres.ocq.size) {
5003 u32 start;
5004 unsigned int sz_kb;
5005
5006 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
5007 start &= PCI_BASE_ADDRESS_MEM_MASK;
5008 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
5009 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
5010 t4_write_reg(adap,
5011 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
5012 start | BIR(1) | WINDOW(ilog2(sz_kb)));
5013 t4_write_reg(adap,
5014 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
5015 adap->vres.ocq.start);
5016 t4_read_reg(adap,
5017 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
5018 }
5019 }
5020
5021 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
5022 {
5023 u32 v;
5024 int ret;
5025
5026 /* get device capabilities */
5027 memset(c, 0, sizeof(*c));
5028 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5029 FW_CMD_REQUEST | FW_CMD_READ);
5030 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
5031 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
5032 if (ret < 0)
5033 return ret;
5034
5035 /* select capabilities we'll be using */
5036 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5037 if (!vf_acls)
5038 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5039 else
5040 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5041 } else if (vf_acls) {
5042 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5043 return ret;
5044 }
5045 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5046 FW_CMD_REQUEST | FW_CMD_WRITE);
5047 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
5048 if (ret < 0)
5049 return ret;
5050
5051 ret = t4_config_glbl_rss(adap, adap->fn,
5052 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5053 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5054 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5055 if (ret < 0)
5056 return ret;
5057
5058 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5059 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5060 if (ret < 0)
5061 return ret;
5062
5063 t4_sge_init(adap);
5064
5065 /* tweak some settings */
5066 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5067 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5068 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5069 v = t4_read_reg(adap, TP_PIO_DATA);
5070 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5071
5072 /* first 4 Tx modulation queues point to consecutive Tx channels */
5073 adap->params.tp.tx_modq_map = 0xE4;
5074 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5075 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5076
5077 /* associate each Tx modulation queue with consecutive Tx channels */
5078 v = 0x84218421;
5079 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5080 &v, 1, A_TP_TX_SCHED_HDR);
5081 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5082 &v, 1, A_TP_TX_SCHED_FIFO);
5083 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5084 &v, 1, A_TP_TX_SCHED_PCMD);
5085
5086 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5087 if (is_offload(adap)) {
5088 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5089 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5090 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5091 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5092 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5093 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5094 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5095 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5096 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5097 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5098 }
5099
5100 /* get basic stuff going */
5101 return t4_early_init(adap, adap->fn);
5102 }
5103
5104 /*
5105 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5106 */
5107 #define MAX_ATIDS 8192U
5108
5109 /*
5110 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5111 *
5112 * If the firmware we're dealing with has Configuration File support, then
5113 * we use that to perform all configuration
5114 */
5115
5116 /*
5117 * Tweak configuration based on module parameters, etc. Most of these have
5118 * defaults assigned to them by Firmware Configuration Files (if we're using
5119 * them) but need to be explicitly set if we're using hard-coded
5120 * initialization. But even in the case of using Firmware Configuration
5121 * Files, we'd like to expose the ability to change these via module
5122 * parameters so these are essentially common tweaks/settings for
5123 * Configuration Files and hard-coded initialization ...
5124 */
5125 static int adap_init0_tweaks(struct adapter *adapter)
5126 {
5127 /*
5128 * Fix up various Host-Dependent Parameters like Page Size, Cache
5129 * Line Size, etc. The firmware default is for a 4KB Page Size and
5130 * 64B Cache Line Size ...
5131 */
5132 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5133
5134 /*
5135 * Process module parameters which affect early initialization.
5136 */
5137 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5138 dev_err(&adapter->pdev->dev,
5139 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5140 rx_dma_offset);
5141 rx_dma_offset = 2;
5142 }
5143 t4_set_reg_field(adapter, SGE_CONTROL,
5144 PKTSHIFT_MASK,
5145 PKTSHIFT(rx_dma_offset));
5146
5147 /*
5148 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5149 * adds the pseudo header itself.
5150 */
5151 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5152 CSUM_HAS_PSEUDO_HDR, 0);
5153
5154 return 0;
5155 }
5156
5157 /*
5158 * Attempt to initialize the adapter via a Firmware Configuration File.
5159 */
5160 static int adap_init0_config(struct adapter *adapter, int reset)
5161 {
5162 struct fw_caps_config_cmd caps_cmd;
5163 const struct firmware *cf;
5164 unsigned long mtype = 0, maddr = 0;
5165 u32 finiver, finicsum, cfcsum;
5166 int ret;
5167 int config_issued = 0;
5168 char *fw_config_file, fw_config_file_path[256];
5169 char *config_name = NULL;
5170
5171 /*
5172 * Reset device if necessary.
5173 */
5174 if (reset) {
5175 ret = t4_fw_reset(adapter, adapter->mbox,
5176 PIORSTMODE | PIORST);
5177 if (ret < 0)
5178 goto bye;
5179 }
5180
5181 /*
5182 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5183 * then use that. Otherwise, use the configuration file stored
5184 * in the adapter flash ...
5185 */
5186 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5187 case CHELSIO_T4:
5188 fw_config_file = FW4_CFNAME;
5189 break;
5190 case CHELSIO_T5:
5191 fw_config_file = FW5_CFNAME;
5192 break;
5193 default:
5194 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5195 adapter->pdev->device);
5196 ret = -EINVAL;
5197 goto bye;
5198 }
5199
5200 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5201 if (ret < 0) {
5202 config_name = "On FLASH";
5203 mtype = FW_MEMTYPE_CF_FLASH;
5204 maddr = t4_flash_cfg_addr(adapter);
5205 } else {
5206 u32 params[7], val[7];
5207
5208 sprintf(fw_config_file_path,
5209 "/lib/firmware/%s", fw_config_file);
5210 config_name = fw_config_file_path;
5211
5212 if (cf->size >= FLASH_CFG_MAX_SIZE)
5213 ret = -ENOMEM;
5214 else {
5215 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5216 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5217 ret = t4_query_params(adapter, adapter->mbox,
5218 adapter->fn, 0, 1, params, val);
5219 if (ret == 0) {
5220 /*
5221 * For t4_memory_rw() below addresses and
5222 * sizes have to be in terms of multiples of 4
5223 * bytes. So, if the Configuration File isn't
5224 * a multiple of 4 bytes in length we'll have
5225 * to write that out separately since we can't
5226 * guarantee that the bytes following the
5227 * residual byte in the buffer returned by
5228 * request_firmware() are zeroed out ...
5229 */
5230 size_t resid = cf->size & 0x3;
5231 size_t size = cf->size & ~0x3;
5232 __be32 *data = (__be32 *)cf->data;
5233
5234 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5235 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5236
5237 spin_lock(&adapter->win0_lock);
5238 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5239 size, data, T4_MEMORY_WRITE);
5240 if (ret == 0 && resid != 0) {
5241 union {
5242 __be32 word;
5243 char buf[4];
5244 } last;
5245 int i;
5246
5247 last.word = data[size >> 2];
5248 for (i = resid; i < 4; i++)
5249 last.buf[i] = 0;
5250 ret = t4_memory_rw(adapter, 0, mtype,
5251 maddr + size,
5252 4, &last.word,
5253 T4_MEMORY_WRITE);
5254 }
5255 spin_unlock(&adapter->win0_lock);
5256 }
5257 }
5258
5259 release_firmware(cf);
5260 if (ret)
5261 goto bye;
5262 }
5263
5264 /*
5265 * Issue a Capability Configuration command to the firmware to get it
5266 * to parse the Configuration File. We don't use t4_fw_config_file()
5267 * because we want the ability to modify various features after we've
5268 * processed the configuration file ...
5269 */
5270 memset(&caps_cmd, 0, sizeof(caps_cmd));
5271 caps_cmd.op_to_write =
5272 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5273 FW_CMD_REQUEST |
5274 FW_CMD_READ);
5275 caps_cmd.cfvalid_to_len16 =
5276 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5277 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5278 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5279 FW_LEN16(caps_cmd));
5280 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5281 &caps_cmd);
5282
5283 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5284 * Configuration File in FLASH), our last gasp effort is to use the
5285 * Firmware Configuration File which is embedded in the firmware. A
5286 * very few early versions of the firmware didn't have one embedded
5287 * but we can ignore those.
5288 */
5289 if (ret == -ENOENT) {
5290 memset(&caps_cmd, 0, sizeof(caps_cmd));
5291 caps_cmd.op_to_write =
5292 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5293 FW_CMD_REQUEST |
5294 FW_CMD_READ);
5295 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5296 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5297 sizeof(caps_cmd), &caps_cmd);
5298 config_name = "Firmware Default";
5299 }
5300
5301 config_issued = 1;
5302 if (ret < 0)
5303 goto bye;
5304
5305 finiver = ntohl(caps_cmd.finiver);
5306 finicsum = ntohl(caps_cmd.finicsum);
5307 cfcsum = ntohl(caps_cmd.cfcsum);
5308 if (finicsum != cfcsum)
5309 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5310 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5311 finicsum, cfcsum);
5312
5313 /*
5314 * And now tell the firmware to use the configuration we just loaded.
5315 */
5316 caps_cmd.op_to_write =
5317 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5318 FW_CMD_REQUEST |
5319 FW_CMD_WRITE);
5320 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5321 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5322 NULL);
5323 if (ret < 0)
5324 goto bye;
5325
5326 /*
5327 * Tweak configuration based on system architecture, module
5328 * parameters, etc.
5329 */
5330 ret = adap_init0_tweaks(adapter);
5331 if (ret < 0)
5332 goto bye;
5333
5334 /*
5335 * And finally tell the firmware to initialize itself using the
5336 * parameters from the Configuration File.
5337 */
5338 ret = t4_fw_initialize(adapter, adapter->mbox);
5339 if (ret < 0)
5340 goto bye;
5341
5342 /*
5343 * Return successfully and note that we're operating with parameters
5344 * not supplied by the driver, rather than from hard-wired
5345 * initialization constants burried in the driver.
5346 */
5347 adapter->flags |= USING_SOFT_PARAMS;
5348 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5349 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5350 config_name, finiver, cfcsum);
5351 return 0;
5352
5353 /*
5354 * Something bad happened. Return the error ... (If the "error"
5355 * is that there's no Configuration File on the adapter we don't
5356 * want to issue a warning since this is fairly common.)
5357 */
5358 bye:
5359 if (config_issued && ret != -ENOENT)
5360 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5361 config_name, -ret);
5362 return ret;
5363 }
5364
5365 /*
5366 * Attempt to initialize the adapter via hard-coded, driver supplied
5367 * parameters ...
5368 */
5369 static int adap_init0_no_config(struct adapter *adapter, int reset)
5370 {
5371 struct sge *s = &adapter->sge;
5372 struct fw_caps_config_cmd caps_cmd;
5373 u32 v;
5374 int i, ret;
5375
5376 /*
5377 * Reset device if necessary
5378 */
5379 if (reset) {
5380 ret = t4_fw_reset(adapter, adapter->mbox,
5381 PIORSTMODE | PIORST);
5382 if (ret < 0)
5383 goto bye;
5384 }
5385
5386 /*
5387 * Get device capabilities and select which we'll be using.
5388 */
5389 memset(&caps_cmd, 0, sizeof(caps_cmd));
5390 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5391 FW_CMD_REQUEST | FW_CMD_READ);
5392 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5393 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5394 &caps_cmd);
5395 if (ret < 0)
5396 goto bye;
5397
5398 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5399 if (!vf_acls)
5400 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5401 else
5402 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5403 } else if (vf_acls) {
5404 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5405 goto bye;
5406 }
5407 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5408 FW_CMD_REQUEST | FW_CMD_WRITE);
5409 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5410 NULL);
5411 if (ret < 0)
5412 goto bye;
5413
5414 /*
5415 * Tweak configuration based on system architecture, module
5416 * parameters, etc.
5417 */
5418 ret = adap_init0_tweaks(adapter);
5419 if (ret < 0)
5420 goto bye;
5421
5422 /*
5423 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5424 * mode which maps each Virtual Interface to its own section of
5425 * the RSS Table and we turn on all map and hash enables ...
5426 */
5427 adapter->flags |= RSS_TNLALLLOOKUP;
5428 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5429 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5430 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5431 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5432 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5433 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5434 if (ret < 0)
5435 goto bye;
5436
5437 /*
5438 * Set up our own fundamental resource provisioning ...
5439 */
5440 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5441 PFRES_NEQ, PFRES_NETHCTRL,
5442 PFRES_NIQFLINT, PFRES_NIQ,
5443 PFRES_TC, PFRES_NVI,
5444 FW_PFVF_CMD_CMASK_MASK,
5445 pfvfres_pmask(adapter, adapter->fn, 0),
5446 PFRES_NEXACTF,
5447 PFRES_R_CAPS, PFRES_WX_CAPS);
5448 if (ret < 0)
5449 goto bye;
5450
5451 /*
5452 * Perform low level SGE initialization. We need to do this before we
5453 * send the firmware the INITIALIZE command because that will cause
5454 * any other PF Drivers which are waiting for the Master
5455 * Initialization to proceed forward.
5456 */
5457 for (i = 0; i < SGE_NTIMERS - 1; i++)
5458 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5459 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5460 s->counter_val[0] = 1;
5461 for (i = 1; i < SGE_NCOUNTERS; i++)
5462 s->counter_val[i] = min(intr_cnt[i - 1],
5463 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5464 t4_sge_init(adapter);
5465
5466 #ifdef CONFIG_PCI_IOV
5467 /*
5468 * Provision resource limits for Virtual Functions. We currently
5469 * grant them all the same static resource limits except for the Port
5470 * Access Rights Mask which we're assigning based on the PF. All of
5471 * the static provisioning stuff for both the PF and VF really needs
5472 * to be managed in a persistent manner for each device which the
5473 * firmware controls.
5474 */
5475 {
5476 int pf, vf;
5477
5478 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5479 if (num_vf[pf] <= 0)
5480 continue;
5481
5482 /* VF numbering starts at 1! */
5483 for (vf = 1; vf <= num_vf[pf]; vf++) {
5484 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5485 pf, vf,
5486 VFRES_NEQ, VFRES_NETHCTRL,
5487 VFRES_NIQFLINT, VFRES_NIQ,
5488 VFRES_TC, VFRES_NVI,
5489 FW_PFVF_CMD_CMASK_MASK,
5490 pfvfres_pmask(
5491 adapter, pf, vf),
5492 VFRES_NEXACTF,
5493 VFRES_R_CAPS, VFRES_WX_CAPS);
5494 if (ret < 0)
5495 dev_warn(adapter->pdev_dev,
5496 "failed to "\
5497 "provision pf/vf=%d/%d; "
5498 "err=%d\n", pf, vf, ret);
5499 }
5500 }
5501 }
5502 #endif
5503
5504 /*
5505 * Set up the default filter mode. Later we'll want to implement this
5506 * via a firmware command, etc. ... This needs to be done before the
5507 * firmare initialization command ... If the selected set of fields
5508 * isn't equal to the default value, we'll need to make sure that the
5509 * field selections will fit in the 36-bit budget.
5510 */
5511 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5512 int j, bits = 0;
5513
5514 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5515 switch (tp_vlan_pri_map & (1 << j)) {
5516 case 0:
5517 /* compressed filter field not enabled */
5518 break;
5519 case FCOE_MASK:
5520 bits += 1;
5521 break;
5522 case PORT_MASK:
5523 bits += 3;
5524 break;
5525 case VNIC_ID_MASK:
5526 bits += 17;
5527 break;
5528 case VLAN_MASK:
5529 bits += 17;
5530 break;
5531 case TOS_MASK:
5532 bits += 8;
5533 break;
5534 case PROTOCOL_MASK:
5535 bits += 8;
5536 break;
5537 case ETHERTYPE_MASK:
5538 bits += 16;
5539 break;
5540 case MACMATCH_MASK:
5541 bits += 9;
5542 break;
5543 case MPSHITTYPE_MASK:
5544 bits += 3;
5545 break;
5546 case FRAGMENTATION_MASK:
5547 bits += 1;
5548 break;
5549 }
5550
5551 if (bits > 36) {
5552 dev_err(adapter->pdev_dev,
5553 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5554 " using %#x\n", tp_vlan_pri_map, bits,
5555 TP_VLAN_PRI_MAP_DEFAULT);
5556 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5557 }
5558 }
5559 v = tp_vlan_pri_map;
5560 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5561 &v, 1, TP_VLAN_PRI_MAP);
5562
5563 /*
5564 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5565 * to support any of the compressed filter fields above. Newer
5566 * versions of the firmware do this automatically but it doesn't hurt
5567 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5568 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5569 * since the firmware automatically turns this on and off when we have
5570 * a non-zero number of filters active (since it does have a
5571 * performance impact).
5572 */
5573 if (tp_vlan_pri_map)
5574 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5575 FIVETUPLELOOKUP_MASK,
5576 FIVETUPLELOOKUP_MASK);
5577
5578 /*
5579 * Tweak some settings.
5580 */
5581 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5582 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5583 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5584 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5585
5586 /*
5587 * Get basic stuff going by issuing the Firmware Initialize command.
5588 * Note that this _must_ be after all PFVF commands ...
5589 */
5590 ret = t4_fw_initialize(adapter, adapter->mbox);
5591 if (ret < 0)
5592 goto bye;
5593
5594 /*
5595 * Return successfully!
5596 */
5597 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5598 "driver parameters\n");
5599 return 0;
5600
5601 /*
5602 * Something bad happened. Return the error ...
5603 */
5604 bye:
5605 return ret;
5606 }
5607
5608 static struct fw_info fw_info_array[] = {
5609 {
5610 .chip = CHELSIO_T4,
5611 .fs_name = FW4_CFNAME,
5612 .fw_mod_name = FW4_FNAME,
5613 .fw_hdr = {
5614 .chip = FW_HDR_CHIP_T4,
5615 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5616 .intfver_nic = FW_INTFVER(T4, NIC),
5617 .intfver_vnic = FW_INTFVER(T4, VNIC),
5618 .intfver_ri = FW_INTFVER(T4, RI),
5619 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5620 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5621 },
5622 }, {
5623 .chip = CHELSIO_T5,
5624 .fs_name = FW5_CFNAME,
5625 .fw_mod_name = FW5_FNAME,
5626 .fw_hdr = {
5627 .chip = FW_HDR_CHIP_T5,
5628 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5629 .intfver_nic = FW_INTFVER(T5, NIC),
5630 .intfver_vnic = FW_INTFVER(T5, VNIC),
5631 .intfver_ri = FW_INTFVER(T5, RI),
5632 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5633 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5634 },
5635 }
5636 };
5637
5638 static struct fw_info *find_fw_info(int chip)
5639 {
5640 int i;
5641
5642 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5643 if (fw_info_array[i].chip == chip)
5644 return &fw_info_array[i];
5645 }
5646 return NULL;
5647 }
5648
5649 /*
5650 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5651 */
5652 static int adap_init0(struct adapter *adap)
5653 {
5654 int ret;
5655 u32 v, port_vec;
5656 enum dev_state state;
5657 u32 params[7], val[7];
5658 struct fw_caps_config_cmd caps_cmd;
5659 int reset = 1;
5660
5661 /*
5662 * Contact FW, advertising Master capability (and potentially forcing
5663 * ourselves as the Master PF if our module parameter force_init is
5664 * set).
5665 */
5666 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5667 force_init ? MASTER_MUST : MASTER_MAY,
5668 &state);
5669 if (ret < 0) {
5670 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5671 ret);
5672 return ret;
5673 }
5674 if (ret == adap->mbox)
5675 adap->flags |= MASTER_PF;
5676 if (force_init && state == DEV_STATE_INIT)
5677 state = DEV_STATE_UNINIT;
5678
5679 /*
5680 * If we're the Master PF Driver and the device is uninitialized,
5681 * then let's consider upgrading the firmware ... (We always want
5682 * to check the firmware version number in order to A. get it for
5683 * later reporting and B. to warn if the currently loaded firmware
5684 * is excessively mismatched relative to the driver.)
5685 */
5686 t4_get_fw_version(adap, &adap->params.fw_vers);
5687 t4_get_tp_version(adap, &adap->params.tp_vers);
5688 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5689 struct fw_info *fw_info;
5690 struct fw_hdr *card_fw;
5691 const struct firmware *fw;
5692 const u8 *fw_data = NULL;
5693 unsigned int fw_size = 0;
5694
5695 /* This is the firmware whose headers the driver was compiled
5696 * against
5697 */
5698 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5699 if (fw_info == NULL) {
5700 dev_err(adap->pdev_dev,
5701 "unable to get firmware info for chip %d.\n",
5702 CHELSIO_CHIP_VERSION(adap->params.chip));
5703 return -EINVAL;
5704 }
5705
5706 /* allocate memory to read the header of the firmware on the
5707 * card
5708 */
5709 card_fw = t4_alloc_mem(sizeof(*card_fw));
5710
5711 /* Get FW from from /lib/firmware/ */
5712 ret = request_firmware(&fw, fw_info->fw_mod_name,
5713 adap->pdev_dev);
5714 if (ret < 0) {
5715 dev_err(adap->pdev_dev,
5716 "unable to load firmware image %s, error %d\n",
5717 fw_info->fw_mod_name, ret);
5718 } else {
5719 fw_data = fw->data;
5720 fw_size = fw->size;
5721 }
5722
5723 /* upgrade FW logic */
5724 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5725 state, &reset);
5726
5727 /* Cleaning up */
5728 if (fw != NULL)
5729 release_firmware(fw);
5730 t4_free_mem(card_fw);
5731
5732 if (ret < 0)
5733 goto bye;
5734 }
5735
5736 /*
5737 * Grab VPD parameters. This should be done after we establish a
5738 * connection to the firmware since some of the VPD parameters
5739 * (notably the Core Clock frequency) are retrieved via requests to
5740 * the firmware. On the other hand, we need these fairly early on
5741 * so we do this right after getting ahold of the firmware.
5742 */
5743 ret = get_vpd_params(adap, &adap->params.vpd);
5744 if (ret < 0)
5745 goto bye;
5746
5747 /*
5748 * Find out what ports are available to us. Note that we need to do
5749 * this before calling adap_init0_no_config() since it needs nports
5750 * and portvec ...
5751 */
5752 v =
5753 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5754 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5755 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5756 if (ret < 0)
5757 goto bye;
5758
5759 adap->params.nports = hweight32(port_vec);
5760 adap->params.portvec = port_vec;
5761
5762 /*
5763 * If the firmware is initialized already (and we're not forcing a
5764 * master initialization), note that we're living with existing
5765 * adapter parameters. Otherwise, it's time to try initializing the
5766 * adapter ...
5767 */
5768 if (state == DEV_STATE_INIT) {
5769 dev_info(adap->pdev_dev, "Coming up as %s: "\
5770 "Adapter already initialized\n",
5771 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5772 adap->flags |= USING_SOFT_PARAMS;
5773 } else {
5774 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5775 "Initializing adapter\n");
5776
5777 /*
5778 * If the firmware doesn't support Configuration
5779 * Files warn user and exit,
5780 */
5781 if (ret < 0)
5782 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5783 "configuration file.\n");
5784 if (force_old_init)
5785 ret = adap_init0_no_config(adap, reset);
5786 else {
5787 /*
5788 * Find out whether we're dealing with a version of
5789 * the firmware which has configuration file support.
5790 */
5791 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5792 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5793 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5794 params, val);
5795
5796 /*
5797 * If the firmware doesn't support Configuration
5798 * Files, use the old Driver-based, hard-wired
5799 * initialization. Otherwise, try using the
5800 * Configuration File support and fall back to the
5801 * Driver-based initialization if there's no
5802 * Configuration File found.
5803 */
5804 if (ret < 0)
5805 ret = adap_init0_no_config(adap, reset);
5806 else {
5807 /*
5808 * The firmware provides us with a memory
5809 * buffer where we can load a Configuration
5810 * File from the host if we want to override
5811 * the Configuration File in flash.
5812 */
5813
5814 ret = adap_init0_config(adap, reset);
5815 if (ret == -ENOENT) {
5816 dev_info(adap->pdev_dev,
5817 "No Configuration File present "
5818 "on adapter. Using hard-wired "
5819 "configuration parameters.\n");
5820 ret = adap_init0_no_config(adap, reset);
5821 }
5822 }
5823 }
5824 if (ret < 0) {
5825 dev_err(adap->pdev_dev,
5826 "could not initialize adapter, error %d\n",
5827 -ret);
5828 goto bye;
5829 }
5830 }
5831
5832 /*
5833 * If we're living with non-hard-coded parameters (either from a
5834 * Firmware Configuration File or values programmed by a different PF
5835 * Driver), give the SGE code a chance to pull in anything that it
5836 * needs ... Note that this must be called after we retrieve our VPD
5837 * parameters in order to know how to convert core ticks to seconds.
5838 */
5839 if (adap->flags & USING_SOFT_PARAMS) {
5840 ret = t4_sge_init(adap);
5841 if (ret < 0)
5842 goto bye;
5843 }
5844
5845 if (is_bypass_device(adap->pdev->device))
5846 adap->params.bypass = 1;
5847
5848 /*
5849 * Grab some of our basic fundamental operating parameters.
5850 */
5851 #define FW_PARAM_DEV(param) \
5852 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5853 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5854
5855 #define FW_PARAM_PFVF(param) \
5856 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5857 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5858 FW_PARAMS_PARAM_Y(0) | \
5859 FW_PARAMS_PARAM_Z(0)
5860
5861 params[0] = FW_PARAM_PFVF(EQ_START);
5862 params[1] = FW_PARAM_PFVF(L2T_START);
5863 params[2] = FW_PARAM_PFVF(L2T_END);
5864 params[3] = FW_PARAM_PFVF(FILTER_START);
5865 params[4] = FW_PARAM_PFVF(FILTER_END);
5866 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5867 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5868 if (ret < 0)
5869 goto bye;
5870 adap->sge.egr_start = val[0];
5871 adap->l2t_start = val[1];
5872 adap->l2t_end = val[2];
5873 adap->tids.ftid_base = val[3];
5874 adap->tids.nftids = val[4] - val[3] + 1;
5875 adap->sge.ingr_start = val[5];
5876
5877 /* query params related to active filter region */
5878 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5879 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5880 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5881 /* If Active filter size is set we enable establishing
5882 * offload connection through firmware work request
5883 */
5884 if ((val[0] != val[1]) && (ret >= 0)) {
5885 adap->flags |= FW_OFLD_CONN;
5886 adap->tids.aftid_base = val[0];
5887 adap->tids.aftid_end = val[1];
5888 }
5889
5890 /* If we're running on newer firmware, let it know that we're
5891 * prepared to deal with encapsulated CPL messages. Older
5892 * firmware won't understand this and we'll just get
5893 * unencapsulated messages ...
5894 */
5895 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5896 val[0] = 1;
5897 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5898
5899 /*
5900 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5901 * capability. Earlier versions of the firmware didn't have the
5902 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5903 * permission to use ULPTX MEMWRITE DSGL.
5904 */
5905 if (is_t4(adap->params.chip)) {
5906 adap->params.ulptx_memwrite_dsgl = false;
5907 } else {
5908 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5909 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5910 1, params, val);
5911 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5912 }
5913
5914 /*
5915 * Get device capabilities so we can determine what resources we need
5916 * to manage.
5917 */
5918 memset(&caps_cmd, 0, sizeof(caps_cmd));
5919 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5920 FW_CMD_REQUEST | FW_CMD_READ);
5921 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5922 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5923 &caps_cmd);
5924 if (ret < 0)
5925 goto bye;
5926
5927 if (caps_cmd.ofldcaps) {
5928 /* query offload-related parameters */
5929 params[0] = FW_PARAM_DEV(NTID);
5930 params[1] = FW_PARAM_PFVF(SERVER_START);
5931 params[2] = FW_PARAM_PFVF(SERVER_END);
5932 params[3] = FW_PARAM_PFVF(TDDP_START);
5933 params[4] = FW_PARAM_PFVF(TDDP_END);
5934 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5935 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5936 params, val);
5937 if (ret < 0)
5938 goto bye;
5939 adap->tids.ntids = val[0];
5940 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5941 adap->tids.stid_base = val[1];
5942 adap->tids.nstids = val[2] - val[1] + 1;
5943 /*
5944 * Setup server filter region. Divide the availble filter
5945 * region into two parts. Regular filters get 1/3rd and server
5946 * filters get 2/3rd part. This is only enabled if workarond
5947 * path is enabled.
5948 * 1. For regular filters.
5949 * 2. Server filter: This are special filters which are used
5950 * to redirect SYN packets to offload queue.
5951 */
5952 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5953 adap->tids.sftid_base = adap->tids.ftid_base +
5954 DIV_ROUND_UP(adap->tids.nftids, 3);
5955 adap->tids.nsftids = adap->tids.nftids -
5956 DIV_ROUND_UP(adap->tids.nftids, 3);
5957 adap->tids.nftids = adap->tids.sftid_base -
5958 adap->tids.ftid_base;
5959 }
5960 adap->vres.ddp.start = val[3];
5961 adap->vres.ddp.size = val[4] - val[3] + 1;
5962 adap->params.ofldq_wr_cred = val[5];
5963
5964 adap->params.offload = 1;
5965 }
5966 if (caps_cmd.rdmacaps) {
5967 params[0] = FW_PARAM_PFVF(STAG_START);
5968 params[1] = FW_PARAM_PFVF(STAG_END);
5969 params[2] = FW_PARAM_PFVF(RQ_START);
5970 params[3] = FW_PARAM_PFVF(RQ_END);
5971 params[4] = FW_PARAM_PFVF(PBL_START);
5972 params[5] = FW_PARAM_PFVF(PBL_END);
5973 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5974 params, val);
5975 if (ret < 0)
5976 goto bye;
5977 adap->vres.stag.start = val[0];
5978 adap->vres.stag.size = val[1] - val[0] + 1;
5979 adap->vres.rq.start = val[2];
5980 adap->vres.rq.size = val[3] - val[2] + 1;
5981 adap->vres.pbl.start = val[4];
5982 adap->vres.pbl.size = val[5] - val[4] + 1;
5983
5984 params[0] = FW_PARAM_PFVF(SQRQ_START);
5985 params[1] = FW_PARAM_PFVF(SQRQ_END);
5986 params[2] = FW_PARAM_PFVF(CQ_START);
5987 params[3] = FW_PARAM_PFVF(CQ_END);
5988 params[4] = FW_PARAM_PFVF(OCQ_START);
5989 params[5] = FW_PARAM_PFVF(OCQ_END);
5990 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5991 val);
5992 if (ret < 0)
5993 goto bye;
5994 adap->vres.qp.start = val[0];
5995 adap->vres.qp.size = val[1] - val[0] + 1;
5996 adap->vres.cq.start = val[2];
5997 adap->vres.cq.size = val[3] - val[2] + 1;
5998 adap->vres.ocq.start = val[4];
5999 adap->vres.ocq.size = val[5] - val[4] + 1;
6000
6001 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
6002 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
6003 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
6004 val);
6005 if (ret < 0) {
6006 adap->params.max_ordird_qp = 8;
6007 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
6008 ret = 0;
6009 } else {
6010 adap->params.max_ordird_qp = val[0];
6011 adap->params.max_ird_adapter = val[1];
6012 }
6013 dev_info(adap->pdev_dev,
6014 "max_ordird_qp %d max_ird_adapter %d\n",
6015 adap->params.max_ordird_qp,
6016 adap->params.max_ird_adapter);
6017 }
6018 if (caps_cmd.iscsicaps) {
6019 params[0] = FW_PARAM_PFVF(ISCSI_START);
6020 params[1] = FW_PARAM_PFVF(ISCSI_END);
6021 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
6022 params, val);
6023 if (ret < 0)
6024 goto bye;
6025 adap->vres.iscsi.start = val[0];
6026 adap->vres.iscsi.size = val[1] - val[0] + 1;
6027 }
6028 #undef FW_PARAM_PFVF
6029 #undef FW_PARAM_DEV
6030
6031 /* The MTU/MSS Table is initialized by now, so load their values. If
6032 * we're initializing the adapter, then we'll make any modifications
6033 * we want to the MTU/MSS Table and also initialize the congestion
6034 * parameters.
6035 */
6036 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
6037 if (state != DEV_STATE_INIT) {
6038 int i;
6039
6040 /* The default MTU Table contains values 1492 and 1500.
6041 * However, for TCP, it's better to have two values which are
6042 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6043 * This allows us to have a TCP Data Payload which is a
6044 * multiple of 8 regardless of what combination of TCP Options
6045 * are in use (always a multiple of 4 bytes) which is
6046 * important for performance reasons. For instance, if no
6047 * options are in use, then we have a 20-byte IP header and a
6048 * 20-byte TCP header. In this case, a 1500-byte MSS would
6049 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6050 * which is not a multiple of 8. So using an MSS of 1488 in
6051 * this case results in a TCP Data Payload of 1448 bytes which
6052 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6053 * Stamps have been negotiated, then an MTU of 1500 bytes
6054 * results in a TCP Data Payload of 1448 bytes which, as
6055 * above, is a multiple of 8 bytes ...
6056 */
6057 for (i = 0; i < NMTUS; i++)
6058 if (adap->params.mtus[i] == 1492) {
6059 adap->params.mtus[i] = 1488;
6060 break;
6061 }
6062
6063 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6064 adap->params.b_wnd);
6065 }
6066 t4_init_tp_params(adap);
6067 adap->flags |= FW_OK;
6068 return 0;
6069
6070 /*
6071 * Something bad happened. If a command timed out or failed with EIO
6072 * FW does not operate within its spec or something catastrophic
6073 * happened to HW/FW, stop issuing commands.
6074 */
6075 bye:
6076 if (ret != -ETIMEDOUT && ret != -EIO)
6077 t4_fw_bye(adap, adap->mbox);
6078 return ret;
6079 }
6080
6081 /* EEH callbacks */
6082
6083 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6084 pci_channel_state_t state)
6085 {
6086 int i;
6087 struct adapter *adap = pci_get_drvdata(pdev);
6088
6089 if (!adap)
6090 goto out;
6091
6092 rtnl_lock();
6093 adap->flags &= ~FW_OK;
6094 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6095 spin_lock(&adap->stats_lock);
6096 for_each_port(adap, i) {
6097 struct net_device *dev = adap->port[i];
6098
6099 netif_device_detach(dev);
6100 netif_carrier_off(dev);
6101 }
6102 spin_unlock(&adap->stats_lock);
6103 if (adap->flags & FULL_INIT_DONE)
6104 cxgb_down(adap);
6105 rtnl_unlock();
6106 if ((adap->flags & DEV_ENABLED)) {
6107 pci_disable_device(pdev);
6108 adap->flags &= ~DEV_ENABLED;
6109 }
6110 out: return state == pci_channel_io_perm_failure ?
6111 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6112 }
6113
6114 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6115 {
6116 int i, ret;
6117 struct fw_caps_config_cmd c;
6118 struct adapter *adap = pci_get_drvdata(pdev);
6119
6120 if (!adap) {
6121 pci_restore_state(pdev);
6122 pci_save_state(pdev);
6123 return PCI_ERS_RESULT_RECOVERED;
6124 }
6125
6126 if (!(adap->flags & DEV_ENABLED)) {
6127 if (pci_enable_device(pdev)) {
6128 dev_err(&pdev->dev, "Cannot reenable PCI "
6129 "device after reset\n");
6130 return PCI_ERS_RESULT_DISCONNECT;
6131 }
6132 adap->flags |= DEV_ENABLED;
6133 }
6134
6135 pci_set_master(pdev);
6136 pci_restore_state(pdev);
6137 pci_save_state(pdev);
6138 pci_cleanup_aer_uncorrect_error_status(pdev);
6139
6140 if (t4_wait_dev_ready(adap->regs) < 0)
6141 return PCI_ERS_RESULT_DISCONNECT;
6142 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6143 return PCI_ERS_RESULT_DISCONNECT;
6144 adap->flags |= FW_OK;
6145 if (adap_init1(adap, &c))
6146 return PCI_ERS_RESULT_DISCONNECT;
6147
6148 for_each_port(adap, i) {
6149 struct port_info *p = adap2pinfo(adap, i);
6150
6151 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6152 NULL, NULL);
6153 if (ret < 0)
6154 return PCI_ERS_RESULT_DISCONNECT;
6155 p->viid = ret;
6156 p->xact_addr_filt = -1;
6157 }
6158
6159 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6160 adap->params.b_wnd);
6161 setup_memwin(adap);
6162 if (cxgb_up(adap))
6163 return PCI_ERS_RESULT_DISCONNECT;
6164 return PCI_ERS_RESULT_RECOVERED;
6165 }
6166
6167 static void eeh_resume(struct pci_dev *pdev)
6168 {
6169 int i;
6170 struct adapter *adap = pci_get_drvdata(pdev);
6171
6172 if (!adap)
6173 return;
6174
6175 rtnl_lock();
6176 for_each_port(adap, i) {
6177 struct net_device *dev = adap->port[i];
6178
6179 if (netif_running(dev)) {
6180 link_start(dev);
6181 cxgb_set_rxmode(dev);
6182 }
6183 netif_device_attach(dev);
6184 }
6185 rtnl_unlock();
6186 }
6187
6188 static const struct pci_error_handlers cxgb4_eeh = {
6189 .error_detected = eeh_err_detected,
6190 .slot_reset = eeh_slot_reset,
6191 .resume = eeh_resume,
6192 };
6193
6194 static inline bool is_x_10g_port(const struct link_config *lc)
6195 {
6196 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6197 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6198 }
6199
6200 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6201 unsigned int us, unsigned int cnt,
6202 unsigned int size, unsigned int iqe_size)
6203 {
6204 q->adap = adap;
6205 set_rspq_intr_params(q, us, cnt);
6206 q->iqe_len = iqe_size;
6207 q->size = size;
6208 }
6209
6210 /*
6211 * Perform default configuration of DMA queues depending on the number and type
6212 * of ports we found and the number of available CPUs. Most settings can be
6213 * modified by the admin prior to actual use.
6214 */
6215 static void cfg_queues(struct adapter *adap)
6216 {
6217 struct sge *s = &adap->sge;
6218 int i, n10g = 0, qidx = 0;
6219 #ifndef CONFIG_CHELSIO_T4_DCB
6220 int q10g = 0;
6221 #endif
6222 int ciq_size;
6223
6224 for_each_port(adap, i)
6225 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6226 #ifdef CONFIG_CHELSIO_T4_DCB
6227 /* For Data Center Bridging support we need to be able to support up
6228 * to 8 Traffic Priorities; each of which will be assigned to its
6229 * own TX Queue in order to prevent Head-Of-Line Blocking.
6230 */
6231 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6232 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6233 MAX_ETH_QSETS, adap->params.nports * 8);
6234 BUG_ON(1);
6235 }
6236
6237 for_each_port(adap, i) {
6238 struct port_info *pi = adap2pinfo(adap, i);
6239
6240 pi->first_qset = qidx;
6241 pi->nqsets = 8;
6242 qidx += pi->nqsets;
6243 }
6244 #else /* !CONFIG_CHELSIO_T4_DCB */
6245 /*
6246 * We default to 1 queue per non-10G port and up to # of cores queues
6247 * per 10G port.
6248 */
6249 if (n10g)
6250 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6251 if (q10g > netif_get_num_default_rss_queues())
6252 q10g = netif_get_num_default_rss_queues();
6253
6254 for_each_port(adap, i) {
6255 struct port_info *pi = adap2pinfo(adap, i);
6256
6257 pi->first_qset = qidx;
6258 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6259 qidx += pi->nqsets;
6260 }
6261 #endif /* !CONFIG_CHELSIO_T4_DCB */
6262
6263 s->ethqsets = qidx;
6264 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6265
6266 if (is_offload(adap)) {
6267 /*
6268 * For offload we use 1 queue/channel if all ports are up to 1G,
6269 * otherwise we divide all available queues amongst the channels
6270 * capped by the number of available cores.
6271 */
6272 if (n10g) {
6273 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6274 num_online_cpus());
6275 s->ofldqsets = roundup(i, adap->params.nports);
6276 } else
6277 s->ofldqsets = adap->params.nports;
6278 /* For RDMA one Rx queue per channel suffices */
6279 s->rdmaqs = adap->params.nports;
6280 s->rdmaciqs = adap->params.nports;
6281 }
6282
6283 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6284 struct sge_eth_rxq *r = &s->ethrxq[i];
6285
6286 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6287 r->fl.size = 72;
6288 }
6289
6290 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6291 s->ethtxq[i].q.size = 1024;
6292
6293 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6294 s->ctrlq[i].q.size = 512;
6295
6296 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6297 s->ofldtxq[i].q.size = 1024;
6298
6299 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6300 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6301
6302 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6303 r->rspq.uld = CXGB4_ULD_ISCSI;
6304 r->fl.size = 72;
6305 }
6306
6307 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6308 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6309
6310 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6311 r->rspq.uld = CXGB4_ULD_RDMA;
6312 r->fl.size = 72;
6313 }
6314
6315 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6316 if (ciq_size > SGE_MAX_IQ_SIZE) {
6317 CH_WARN(adap, "CIQ size too small for available IQs\n");
6318 ciq_size = SGE_MAX_IQ_SIZE;
6319 }
6320
6321 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6322 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6323
6324 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6325 r->rspq.uld = CXGB4_ULD_RDMA;
6326 }
6327
6328 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6329 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6330 }
6331
6332 /*
6333 * Reduce the number of Ethernet queues across all ports to at most n.
6334 * n provides at least one queue per port.
6335 */
6336 static void reduce_ethqs(struct adapter *adap, int n)
6337 {
6338 int i;
6339 struct port_info *pi;
6340
6341 while (n < adap->sge.ethqsets)
6342 for_each_port(adap, i) {
6343 pi = adap2pinfo(adap, i);
6344 if (pi->nqsets > 1) {
6345 pi->nqsets--;
6346 adap->sge.ethqsets--;
6347 if (adap->sge.ethqsets <= n)
6348 break;
6349 }
6350 }
6351
6352 n = 0;
6353 for_each_port(adap, i) {
6354 pi = adap2pinfo(adap, i);
6355 pi->first_qset = n;
6356 n += pi->nqsets;
6357 }
6358 }
6359
6360 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6361 #define EXTRA_VECS 2
6362
6363 static int enable_msix(struct adapter *adap)
6364 {
6365 int ofld_need = 0;
6366 int i, want, need;
6367 struct sge *s = &adap->sge;
6368 unsigned int nchan = adap->params.nports;
6369 struct msix_entry entries[MAX_INGQ + 1];
6370
6371 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6372 entries[i].entry = i;
6373
6374 want = s->max_ethqsets + EXTRA_VECS;
6375 if (is_offload(adap)) {
6376 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6377 /* need nchan for each possible ULD */
6378 ofld_need = 3 * nchan;
6379 }
6380 #ifdef CONFIG_CHELSIO_T4_DCB
6381 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6382 * each port.
6383 */
6384 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6385 #else
6386 need = adap->params.nports + EXTRA_VECS + ofld_need;
6387 #endif
6388 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6389 if (want < 0)
6390 return want;
6391
6392 /*
6393 * Distribute available vectors to the various queue groups.
6394 * Every group gets its minimum requirement and NIC gets top
6395 * priority for leftovers.
6396 */
6397 i = want - EXTRA_VECS - ofld_need;
6398 if (i < s->max_ethqsets) {
6399 s->max_ethqsets = i;
6400 if (i < s->ethqsets)
6401 reduce_ethqs(adap, i);
6402 }
6403 if (is_offload(adap)) {
6404 i = want - EXTRA_VECS - s->max_ethqsets;
6405 i -= ofld_need - nchan;
6406 s->ofldqsets = (i / nchan) * nchan; /* round down */
6407 }
6408 for (i = 0; i < want; ++i)
6409 adap->msix_info[i].vec = entries[i].vector;
6410
6411 return 0;
6412 }
6413
6414 #undef EXTRA_VECS
6415
6416 static int init_rss(struct adapter *adap)
6417 {
6418 unsigned int i, j;
6419
6420 for_each_port(adap, i) {
6421 struct port_info *pi = adap2pinfo(adap, i);
6422
6423 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6424 if (!pi->rss)
6425 return -ENOMEM;
6426 for (j = 0; j < pi->rss_size; j++)
6427 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6428 }
6429 return 0;
6430 }
6431
6432 static void print_port_info(const struct net_device *dev)
6433 {
6434 char buf[80];
6435 char *bufp = buf;
6436 const char *spd = "";
6437 const struct port_info *pi = netdev_priv(dev);
6438 const struct adapter *adap = pi->adapter;
6439
6440 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6441 spd = " 2.5 GT/s";
6442 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6443 spd = " 5 GT/s";
6444 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6445 spd = " 8 GT/s";
6446
6447 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6448 bufp += sprintf(bufp, "100/");
6449 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6450 bufp += sprintf(bufp, "1000/");
6451 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6452 bufp += sprintf(bufp, "10G/");
6453 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6454 bufp += sprintf(bufp, "40G/");
6455 if (bufp != buf)
6456 --bufp;
6457 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6458
6459 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6460 adap->params.vpd.id,
6461 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6462 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6463 (adap->flags & USING_MSIX) ? " MSI-X" :
6464 (adap->flags & USING_MSI) ? " MSI" : "");
6465 netdev_info(dev, "S/N: %s, P/N: %s\n",
6466 adap->params.vpd.sn, adap->params.vpd.pn);
6467 }
6468
6469 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6470 {
6471 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6472 }
6473
6474 /*
6475 * Free the following resources:
6476 * - memory used for tables
6477 * - MSI/MSI-X
6478 * - net devices
6479 * - resources FW is holding for us
6480 */
6481 static void free_some_resources(struct adapter *adapter)
6482 {
6483 unsigned int i;
6484
6485 t4_free_mem(adapter->l2t);
6486 t4_free_mem(adapter->tids.tid_tab);
6487 disable_msi(adapter);
6488
6489 for_each_port(adapter, i)
6490 if (adapter->port[i]) {
6491 kfree(adap2pinfo(adapter, i)->rss);
6492 free_netdev(adapter->port[i]);
6493 }
6494 if (adapter->flags & FW_OK)
6495 t4_fw_bye(adapter, adapter->fn);
6496 }
6497
6498 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6499 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6500 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6501 #define SEGMENT_SIZE 128
6502
6503 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6504 {
6505 int func, i, err, s_qpp, qpp, num_seg;
6506 struct port_info *pi;
6507 bool highdma = false;
6508 struct adapter *adapter = NULL;
6509 void __iomem *regs;
6510
6511 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6512
6513 err = pci_request_regions(pdev, KBUILD_MODNAME);
6514 if (err) {
6515 /* Just info, some other driver may have claimed the device. */
6516 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6517 return err;
6518 }
6519
6520 err = pci_enable_device(pdev);
6521 if (err) {
6522 dev_err(&pdev->dev, "cannot enable PCI device\n");
6523 goto out_release_regions;
6524 }
6525
6526 regs = pci_ioremap_bar(pdev, 0);
6527 if (!regs) {
6528 dev_err(&pdev->dev, "cannot map device registers\n");
6529 err = -ENOMEM;
6530 goto out_disable_device;
6531 }
6532
6533 err = t4_wait_dev_ready(regs);
6534 if (err < 0)
6535 goto out_unmap_bar0;
6536
6537 /* We control everything through one PF */
6538 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6539 if (func != ent->driver_data) {
6540 iounmap(regs);
6541 pci_disable_device(pdev);
6542 pci_save_state(pdev); /* to restore SR-IOV later */
6543 goto sriov;
6544 }
6545
6546 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6547 highdma = true;
6548 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6549 if (err) {
6550 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6551 "coherent allocations\n");
6552 goto out_unmap_bar0;
6553 }
6554 } else {
6555 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6556 if (err) {
6557 dev_err(&pdev->dev, "no usable DMA configuration\n");
6558 goto out_unmap_bar0;
6559 }
6560 }
6561
6562 pci_enable_pcie_error_reporting(pdev);
6563 enable_pcie_relaxed_ordering(pdev);
6564 pci_set_master(pdev);
6565 pci_save_state(pdev);
6566
6567 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6568 if (!adapter) {
6569 err = -ENOMEM;
6570 goto out_unmap_bar0;
6571 }
6572
6573 adapter->workq = create_singlethread_workqueue("cxgb4");
6574 if (!adapter->workq) {
6575 err = -ENOMEM;
6576 goto out_free_adapter;
6577 }
6578
6579 /* PCI device has been enabled */
6580 adapter->flags |= DEV_ENABLED;
6581
6582 adapter->regs = regs;
6583 adapter->pdev = pdev;
6584 adapter->pdev_dev = &pdev->dev;
6585 adapter->mbox = func;
6586 adapter->fn = func;
6587 adapter->msg_enable = dflt_msg_enable;
6588 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6589
6590 spin_lock_init(&adapter->stats_lock);
6591 spin_lock_init(&adapter->tid_release_lock);
6592
6593 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6594 INIT_WORK(&adapter->db_full_task, process_db_full);
6595 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6596
6597 err = t4_prep_adapter(adapter);
6598 if (err)
6599 goto out_free_adapter;
6600
6601
6602 if (!is_t4(adapter->params.chip)) {
6603 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6604 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6605 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6606 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6607
6608 /* Each segment size is 128B. Write coalescing is enabled only
6609 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6610 * queue is less no of segments that can be accommodated in
6611 * a page size.
6612 */
6613 if (qpp > num_seg) {
6614 dev_err(&pdev->dev,
6615 "Incorrect number of egress queues per page\n");
6616 err = -EINVAL;
6617 goto out_free_adapter;
6618 }
6619 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6620 pci_resource_len(pdev, 2));
6621 if (!adapter->bar2) {
6622 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6623 err = -ENOMEM;
6624 goto out_free_adapter;
6625 }
6626 }
6627
6628 setup_memwin(adapter);
6629 err = adap_init0(adapter);
6630 setup_memwin_rdma(adapter);
6631 if (err)
6632 goto out_unmap_bar;
6633
6634 for_each_port(adapter, i) {
6635 struct net_device *netdev;
6636
6637 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6638 MAX_ETH_QSETS);
6639 if (!netdev) {
6640 err = -ENOMEM;
6641 goto out_free_dev;
6642 }
6643
6644 SET_NETDEV_DEV(netdev, &pdev->dev);
6645
6646 adapter->port[i] = netdev;
6647 pi = netdev_priv(netdev);
6648 pi->adapter = adapter;
6649 pi->xact_addr_filt = -1;
6650 pi->port_id = i;
6651 netdev->irq = pdev->irq;
6652
6653 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6654 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6655 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6656 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6657 if (highdma)
6658 netdev->hw_features |= NETIF_F_HIGHDMA;
6659 netdev->features |= netdev->hw_features;
6660 netdev->vlan_features = netdev->features & VLAN_FEAT;
6661
6662 netdev->priv_flags |= IFF_UNICAST_FLT;
6663
6664 netdev->netdev_ops = &cxgb4_netdev_ops;
6665 #ifdef CONFIG_CHELSIO_T4_DCB
6666 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6667 cxgb4_dcb_state_init(netdev);
6668 #endif
6669 netdev->ethtool_ops = &cxgb_ethtool_ops;
6670 }
6671
6672 pci_set_drvdata(pdev, adapter);
6673
6674 if (adapter->flags & FW_OK) {
6675 err = t4_port_init(adapter, func, func, 0);
6676 if (err)
6677 goto out_free_dev;
6678 }
6679
6680 /*
6681 * Configure queues and allocate tables now, they can be needed as
6682 * soon as the first register_netdev completes.
6683 */
6684 cfg_queues(adapter);
6685
6686 adapter->l2t = t4_init_l2t();
6687 if (!adapter->l2t) {
6688 /* We tolerate a lack of L2T, giving up some functionality */
6689 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6690 adapter->params.offload = 0;
6691 }
6692
6693 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6694 dev_warn(&pdev->dev, "could not allocate TID table, "
6695 "continuing\n");
6696 adapter->params.offload = 0;
6697 }
6698
6699 /* See what interrupts we'll be using */
6700 if (msi > 1 && enable_msix(adapter) == 0)
6701 adapter->flags |= USING_MSIX;
6702 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6703 adapter->flags |= USING_MSI;
6704
6705 err = init_rss(adapter);
6706 if (err)
6707 goto out_free_dev;
6708
6709 /*
6710 * The card is now ready to go. If any errors occur during device
6711 * registration we do not fail the whole card but rather proceed only
6712 * with the ports we manage to register successfully. However we must
6713 * register at least one net device.
6714 */
6715 for_each_port(adapter, i) {
6716 pi = adap2pinfo(adapter, i);
6717 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6718 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6719
6720 err = register_netdev(adapter->port[i]);
6721 if (err)
6722 break;
6723 adapter->chan_map[pi->tx_chan] = i;
6724 print_port_info(adapter->port[i]);
6725 }
6726 if (i == 0) {
6727 dev_err(&pdev->dev, "could not register any net devices\n");
6728 goto out_free_dev;
6729 }
6730 if (err) {
6731 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6732 err = 0;
6733 }
6734
6735 if (cxgb4_debugfs_root) {
6736 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6737 cxgb4_debugfs_root);
6738 setup_debugfs(adapter);
6739 }
6740
6741 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6742 pdev->needs_freset = 1;
6743
6744 if (is_offload(adapter))
6745 attach_ulds(adapter);
6746
6747 sriov:
6748 #ifdef CONFIG_PCI_IOV
6749 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6750 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6751 dev_info(&pdev->dev,
6752 "instantiated %u virtual functions\n",
6753 num_vf[func]);
6754 #endif
6755 return 0;
6756
6757 out_free_dev:
6758 free_some_resources(adapter);
6759 out_unmap_bar:
6760 if (!is_t4(adapter->params.chip))
6761 iounmap(adapter->bar2);
6762 out_free_adapter:
6763 if (adapter->workq)
6764 destroy_workqueue(adapter->workq);
6765
6766 kfree(adapter);
6767 out_unmap_bar0:
6768 iounmap(regs);
6769 out_disable_device:
6770 pci_disable_pcie_error_reporting(pdev);
6771 pci_disable_device(pdev);
6772 out_release_regions:
6773 pci_release_regions(pdev);
6774 return err;
6775 }
6776
6777 static void remove_one(struct pci_dev *pdev)
6778 {
6779 struct adapter *adapter = pci_get_drvdata(pdev);
6780
6781 #ifdef CONFIG_PCI_IOV
6782 pci_disable_sriov(pdev);
6783
6784 #endif
6785
6786 if (adapter) {
6787 int i;
6788
6789 /* Tear down per-adapter Work Queue first since it can contain
6790 * references to our adapter data structure.
6791 */
6792 destroy_workqueue(adapter->workq);
6793
6794 if (is_offload(adapter))
6795 detach_ulds(adapter);
6796
6797 for_each_port(adapter, i)
6798 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6799 unregister_netdev(adapter->port[i]);
6800
6801 debugfs_remove_recursive(adapter->debugfs_root);
6802
6803 /* If we allocated filters, free up state associated with any
6804 * valid filters ...
6805 */
6806 if (adapter->tids.ftid_tab) {
6807 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6808 for (i = 0; i < (adapter->tids.nftids +
6809 adapter->tids.nsftids); i++, f++)
6810 if (f->valid)
6811 clear_filter(adapter, f);
6812 }
6813
6814 if (adapter->flags & FULL_INIT_DONE)
6815 cxgb_down(adapter);
6816
6817 free_some_resources(adapter);
6818 iounmap(adapter->regs);
6819 if (!is_t4(adapter->params.chip))
6820 iounmap(adapter->bar2);
6821 pci_disable_pcie_error_reporting(pdev);
6822 if ((adapter->flags & DEV_ENABLED)) {
6823 pci_disable_device(pdev);
6824 adapter->flags &= ~DEV_ENABLED;
6825 }
6826 pci_release_regions(pdev);
6827 synchronize_rcu();
6828 kfree(adapter);
6829 } else
6830 pci_release_regions(pdev);
6831 }
6832
6833 static struct pci_driver cxgb4_driver = {
6834 .name = KBUILD_MODNAME,
6835 .id_table = cxgb4_pci_tbl,
6836 .probe = init_one,
6837 .remove = remove_one,
6838 .shutdown = remove_one,
6839 .err_handler = &cxgb4_eeh,
6840 };
6841
6842 static int __init cxgb4_init_module(void)
6843 {
6844 int ret;
6845
6846 /* Debugfs support is optional, just warn if this fails */
6847 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6848 if (!cxgb4_debugfs_root)
6849 pr_warn("could not create debugfs entry, continuing\n");
6850
6851 ret = pci_register_driver(&cxgb4_driver);
6852 if (ret < 0)
6853 debugfs_remove(cxgb4_debugfs_root);
6854
6855 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6856
6857 return ret;
6858 }
6859
6860 static void __exit cxgb4_cleanup_module(void)
6861 {
6862 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6863 pci_unregister_driver(&cxgb4_driver);
6864 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6865 }
6866
6867 module_init(cxgb4_init_module);
6868 module_exit(cxgb4_cleanup_module);
This page took 0.168354 seconds and 6 git commands to generate.