qlcnic: fix chip reset logic
[deliverable/linux.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491 1/*
40839129
SV
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
af19b491 4 *
40839129 5 * See LICENSE.qlcnic for copyright and licensing details.
af19b491
AKS
6 */
7
5a0e3ad6 8#include <linux/slab.h>
af19b491
AKS
9#include <linux/vmalloc.h>
10#include <linux/interrupt.h>
11
12#include "qlcnic.h"
13
7e56cac4 14#include <linux/swab.h>
af19b491 15#include <linux/dma-mapping.h>
af19b491
AKS
16#include <net/ip.h>
17#include <linux/ipv6.h>
18#include <linux/inetdevice.h>
19#include <linux/sysfs.h>
451724c8 20#include <linux/aer.h>
f94bc1e7 21#include <linux/log2.h>
af19b491 22
7f9a0c34 23MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
24MODULE_LICENSE("GPL");
25MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
26MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
27
28char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
29static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
30 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 31
f7ec804a 32static struct workqueue_struct *qlcnic_wq;
b5e5492c 33static int qlcnic_mac_learn;
b11a25aa 34module_param(qlcnic_mac_learn, int, 0444);
b5e5492c
AKS
35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
36
af19b491 37static int use_msi = 1;
b11a25aa 38module_param(use_msi, int, 0444);
af19b491
AKS
39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
40
41static int use_msi_x = 1;
b11a25aa 42module_param(use_msi_x, int, 0444);
af19b491
AKS
43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
44
9ce13ca8 45static int auto_fw_reset = 1;
af19b491
AKS
46module_param(auto_fw_reset, int, 0644);
47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
48
4d5bdb38 49static int load_fw_file;
b11a25aa 50module_param(load_fw_file, int, 0444);
4d5bdb38
AKS
51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
52
2e9d722d 53static int qlcnic_config_npars;
b11a25aa 54module_param(qlcnic_config_npars, int, 0444);
2e9d722d
AC
55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
56
af19b491
AKS
57static int __devinit qlcnic_probe(struct pci_dev *pdev,
58 const struct pci_device_id *ent);
59static void __devexit qlcnic_remove(struct pci_dev *pdev);
60static int qlcnic_open(struct net_device *netdev);
61static int qlcnic_close(struct net_device *netdev);
af19b491 62static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
63static void qlcnic_attach_work(struct work_struct *work);
64static void qlcnic_fwinit_work(struct work_struct *work);
65static void qlcnic_fw_poll_work(struct work_struct *work);
66static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
67 work_func_t func, int delay);
68static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
69static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 70static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
71#ifdef CONFIG_NET_POLL_CONTROLLER
72static void qlcnic_poll_controller(struct net_device *netdev);
73#endif
74
75static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
76static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
77static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
78static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
79
6df900e9 80static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 81static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
82static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
83
7eb9855d 84static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
85static irqreturn_t qlcnic_intr(int irq, void *data);
86static irqreturn_t qlcnic_msi_intr(int irq, void *data);
87static irqreturn_t qlcnic_msix_intr(int irq, void *data);
88
89static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 90static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
91static int qlcnic_start_firmware(struct qlcnic_adapter *);
92
b5e5492c
AKS
93static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
94static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 95static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
96static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
97static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
98static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
99static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
100 struct qlcnic_esw_func_cfg *);
b9796a14
AC
101static void qlcnic_vlan_rx_add(struct net_device *, u16);
102static void qlcnic_vlan_rx_del(struct net_device *, u16);
103
af19b491
AKS
104/* PCI Device ID Table */
105#define ENTRY(device) \
106 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
107 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
108
109#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
110
6a902881 111static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
112 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
113 {0,}
114};
115
116MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
117
118
b1fc6d3c 119inline void
af19b491
AKS
120qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
121 struct qlcnic_host_tx_ring *tx_ring)
122{
123 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
124}
125
126static const u32 msi_tgt_status[8] = {
127 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
128 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
129 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
130 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
131};
132
133static const
134struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
135
136static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
137{
138 writel(0, sds_ring->crb_intr_mask);
139}
140
141static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
142{
143 struct qlcnic_adapter *adapter = sds_ring->adapter;
144
145 writel(0x1, sds_ring->crb_intr_mask);
146
147 if (!QLCNIC_IS_MSI_FAMILY(adapter))
148 writel(0xfbff, adapter->tgt_mask_reg);
149}
150
151static int
152qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
153{
154 int size = sizeof(struct qlcnic_host_sds_ring) * count;
155
156 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
157
807540ba 158 return recv_ctx->sds_rings == NULL;
af19b491
AKS
159}
160
161static void
162qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
163{
164 if (recv_ctx->sds_rings != NULL)
165 kfree(recv_ctx->sds_rings);
166
167 recv_ctx->sds_rings = NULL;
168}
169
170static int
171qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
172{
173 int ring;
174 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 175 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
176
177 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
178 return -ENOMEM;
179
180 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
181 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 182
183 if (ring == adapter->max_sds_rings - 1)
184 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
185 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
186 else
187 netif_napi_add(netdev, &sds_ring->napi,
188 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
189 }
190
191 return 0;
192}
193
194static void
195qlcnic_napi_del(struct qlcnic_adapter *adapter)
196{
197 int ring;
198 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 199 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
200
201 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
202 sds_ring = &recv_ctx->sds_rings[ring];
203 netif_napi_del(&sds_ring->napi);
204 }
205
b1fc6d3c 206 qlcnic_free_sds_rings(adapter->recv_ctx);
af19b491
AKS
207}
208
209static void
210qlcnic_napi_enable(struct qlcnic_adapter *adapter)
211{
212 int ring;
213 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 214 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 215
780ab790
AKS
216 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
217 return;
218
af19b491
AKS
219 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
220 sds_ring = &recv_ctx->sds_rings[ring];
221 napi_enable(&sds_ring->napi);
222 qlcnic_enable_int(sds_ring);
223 }
224}
225
226static void
227qlcnic_napi_disable(struct qlcnic_adapter *adapter)
228{
229 int ring;
230 struct qlcnic_host_sds_ring *sds_ring;
b1fc6d3c 231 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 232
780ab790
AKS
233 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
234 return;
235
af19b491
AKS
236 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
237 sds_ring = &recv_ctx->sds_rings[ring];
238 qlcnic_disable_int(sds_ring);
239 napi_synchronize(&sds_ring->napi);
240 napi_disable(&sds_ring->napi);
241 }
242}
243
244static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
245{
246 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
247}
248
af19b491
AKS
249static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
250{
251 u32 control;
252 int pos;
253
254 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
255 if (pos) {
256 pci_read_config_dword(pdev, pos, &control);
257 if (enable)
258 control |= PCI_MSIX_FLAGS_ENABLE;
259 else
260 control = 0;
261 pci_write_config_dword(pdev, pos, control);
262 }
263}
264
265static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
266{
267 int i;
268
269 for (i = 0; i < count; i++)
270 adapter->msix_entries[i].entry = i;
271}
272
273static int
274qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
275{
2e9d722d 276 u8 mac_addr[ETH_ALEN];
af19b491
AKS
277 struct net_device *netdev = adapter->netdev;
278 struct pci_dev *pdev = adapter->pdev;
279
da48e6c3 280 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
281 return -EIO;
282
2e9d722d 283 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
284 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
285 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
286
287 /* set station address */
288
289 if (!is_valid_ether_addr(netdev->perm_addr))
290 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
291 netdev->dev_addr);
292
293 return 0;
294}
295
296static int qlcnic_set_mac(struct net_device *netdev, void *p)
297{
298 struct qlcnic_adapter *adapter = netdev_priv(netdev);
299 struct sockaddr *addr = p;
300
7373373d
RB
301 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
302 return -EOPNOTSUPP;
303
af19b491
AKS
304 if (!is_valid_ether_addr(addr->sa_data))
305 return -EINVAL;
306
8a15ad1f 307 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
308 netif_device_detach(netdev);
309 qlcnic_napi_disable(adapter);
310 }
311
312 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
313 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
314 qlcnic_set_multi(adapter->netdev);
315
8a15ad1f 316 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
317 netif_device_attach(netdev);
318 qlcnic_napi_enable(adapter);
319 }
320 return 0;
321}
322
323static const struct net_device_ops qlcnic_netdev_ops = {
324 .ndo_open = qlcnic_open,
325 .ndo_stop = qlcnic_close,
326 .ndo_start_xmit = qlcnic_xmit_frame,
327 .ndo_get_stats = qlcnic_get_stats,
328 .ndo_validate_addr = eth_validate_addr,
329 .ndo_set_multicast_list = qlcnic_set_multi,
330 .ndo_set_mac_address = qlcnic_set_mac,
331 .ndo_change_mtu = qlcnic_change_mtu,
135d84a9
MM
332 .ndo_fix_features = qlcnic_fix_features,
333 .ndo_set_features = qlcnic_set_features,
af19b491 334 .ndo_tx_timeout = qlcnic_tx_timeout,
b9796a14
AC
335 .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
336 .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
af19b491
AKS
337#ifdef CONFIG_NET_POLL_CONTROLLER
338 .ndo_poll_controller = qlcnic_poll_controller,
339#endif
340};
341
2e9d722d 342static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
343 .config_bridged_mode = qlcnic_config_bridged_mode,
344 .config_led = qlcnic_config_led,
9f26f547
AC
345 .start_firmware = qlcnic_start_firmware
346};
347
348static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
349 .config_bridged_mode = qlcnicvf_config_bridged_mode,
350 .config_led = qlcnicvf_config_led,
9f26f547 351 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
352};
353
f94bc1e7 354static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
af19b491 355{
af19b491 356 struct pci_dev *pdev = adapter->pdev;
f94bc1e7 357 int err = -1;
af19b491
AKS
358
359 adapter->max_sds_rings = 1;
af19b491 360 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
af19b491
AKS
361 qlcnic_set_msix_bit(pdev, 0);
362
363 if (adapter->msix_supported) {
f94bc1e7 364 enable_msix:
af19b491
AKS
365 qlcnic_init_msix_entries(adapter, num_msix);
366 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
367 if (err == 0) {
368 adapter->flags |= QLCNIC_MSIX_ENABLED;
369 qlcnic_set_msix_bit(pdev, 1);
370
b1fc6d3c 371 adapter->max_sds_rings = num_msix;
af19b491
AKS
372
373 dev_info(&pdev->dev, "using msi-x interrupts\n");
f94bc1e7 374 return err;
af19b491 375 }
f94bc1e7
SC
376 if (err > 0) {
377 num_msix = rounddown_pow_of_two(err);
378 if (num_msix)
379 goto enable_msix;
380 }
381 }
382 return err;
383}
af19b491 384
af19b491 385
f94bc1e7
SC
386static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
387{
388 const struct qlcnic_legacy_intr_set *legacy_intrp;
389 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
390
391 if (use_msi && !pci_enable_msi(pdev)) {
392 adapter->flags |= QLCNIC_MSI_ENABLED;
393 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
b1fc6d3c 394 msi_tgt_status[adapter->ahw->pci_func]);
af19b491
AKS
395 dev_info(&pdev->dev, "using msi interrupts\n");
396 adapter->msix_entries[0].vector = pdev->irq;
397 return;
398 }
399
f94bc1e7
SC
400 legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
401
402 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
403 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
404 legacy_intrp->tgt_status_reg);
405 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
406 legacy_intrp->tgt_mask_reg);
407 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
408
409 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
410 ISR_INT_STATE_REG);
af19b491
AKS
411 dev_info(&pdev->dev, "using legacy interrupts\n");
412 adapter->msix_entries[0].vector = pdev->irq;
413}
414
f94bc1e7
SC
415static void
416qlcnic_setup_intr(struct qlcnic_adapter *adapter)
417{
418 int num_msix;
419
420 if (adapter->msix_supported) {
5f6ec29a
SC
421 num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
422 QLCNIC_DEF_NUM_STS_DESC_RINGS));
f94bc1e7
SC
423 } else
424 num_msix = 1;
425
426 if (!qlcnic_enable_msix(adapter, num_msix))
427 return;
428
429 qlcnic_enable_msi_legacy(adapter);
430}
431
af19b491
AKS
432static void
433qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
434{
435 if (adapter->flags & QLCNIC_MSIX_ENABLED)
436 pci_disable_msix(adapter->pdev);
437 if (adapter->flags & QLCNIC_MSI_ENABLED)
438 pci_disable_msi(adapter->pdev);
439}
440
441static void
442qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
443{
b1fc6d3c
AC
444 if (adapter->ahw->pci_base0 != NULL)
445 iounmap(adapter->ahw->pci_base0);
af19b491
AKS
446}
447
346fe763
RB
448static int
449qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
450{
e88db3bd 451 struct qlcnic_pci_info *pci_info;
900853a4 452 int i, ret = 0;
346fe763
RB
453 u8 pfn;
454
e88db3bd
DC
455 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
456 if (!pci_info)
457 return -ENOMEM;
458
ca315ac2 459 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 460 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 461 if (!adapter->npars) {
900853a4 462 ret = -ENOMEM;
e88db3bd
DC
463 goto err_pci_info;
464 }
346fe763 465
ca315ac2 466 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
467 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
468 if (!adapter->eswitch) {
900853a4 469 ret = -ENOMEM;
ca315ac2 470 goto err_npars;
346fe763
RB
471 }
472
473 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
474 if (ret)
475 goto err_eswitch;
346fe763 476
ca315ac2
DC
477 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
478 pfn = pci_info[i].id;
f848d6dd
SC
479 if (pfn > QLCNIC_MAX_PCI_FUNC) {
480 ret = QL_STATUS_INVALID_PARAM;
481 goto err_eswitch;
482 }
a1c0c459
SC
483 adapter->npars[pfn].active = (u8)pci_info[i].active;
484 adapter->npars[pfn].type = (u8)pci_info[i].type;
485 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
486 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
487 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
488 }
489
ca315ac2
DC
490 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
491 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
492
e88db3bd 493 kfree(pci_info);
ca315ac2
DC
494 return 0;
495
496err_eswitch:
346fe763
RB
497 kfree(adapter->eswitch);
498 adapter->eswitch = NULL;
ca315ac2 499err_npars:
346fe763 500 kfree(adapter->npars);
ca315ac2 501 adapter->npars = NULL;
e88db3bd
DC
502err_pci_info:
503 kfree(pci_info);
346fe763
RB
504
505 return ret;
506}
507
2e9d722d
AC
508static int
509qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
510{
511 u8 id;
512 u32 ref_count;
513 int i, ret = 1;
514 u32 data = QLCNIC_MGMT_FUNC;
b1fc6d3c 515 void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d
AC
516
517 /* If other drivers are not in use set their privilege level */
31018e06 518 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
519 ret = qlcnic_api_lock(adapter);
520 if (ret)
521 goto err_lock;
2e9d722d 522
0e33c664
AC
523 if (qlcnic_config_npars) {
524 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 525 id = i;
0e33c664 526 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
b1fc6d3c 527 id == adapter->ahw->pci_func)
0e33c664
AC
528 continue;
529 data |= (qlcnic_config_npars &
530 QLC_DEV_SET_DRV(0xf, id));
531 }
532 } else {
533 data = readl(priv_op);
b1fc6d3c 534 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
0e33c664 535 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
b1fc6d3c 536 adapter->ahw->pci_func));
2e9d722d
AC
537 }
538 writel(data, priv_op);
2e9d722d
AC
539 qlcnic_api_unlock(adapter);
540err_lock:
541 return ret;
542}
543
0866d96d
AC
544static void
545qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
546{
547 void __iomem *msix_base_addr;
548 void __iomem *priv_op;
549 u32 func;
550 u32 msix_base;
551 u32 op_mode, priv_level;
552
553 /* Determine FW API version */
b1fc6d3c
AC
554 adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
555 QLCNIC_FW_API);
2e9d722d
AC
556
557 /* Find PCI function number */
558 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
b1fc6d3c 559 msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
2e9d722d
AC
560 msix_base = readl(msix_base_addr);
561 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
b1fc6d3c 562 adapter->ahw->pci_func = func;
2e9d722d
AC
563
564 /* Determine function privilege level */
b1fc6d3c 565 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
2e9d722d 566 op_mode = readl(priv_op);
0e33c664 567 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 568 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 569 else
b1fc6d3c 570 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
2e9d722d 571
0866d96d 572 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
573 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
574 dev_info(&adapter->pdev->dev,
575 "HAL Version: %d Non Privileged function\n",
576 adapter->fw_hal_version);
577 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
578 } else
579 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
580}
581
af19b491
AKS
582static int
583qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
584{
585 void __iomem *mem_ptr0 = NULL;
586 resource_size_t mem_base;
587 unsigned long mem_len, pci_len0 = 0;
588
589 struct pci_dev *pdev = adapter->pdev;
af19b491 590
af19b491
AKS
591 /* remap phys address */
592 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
593 mem_len = pci_resource_len(pdev, 0);
594
595 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
596
597 mem_ptr0 = pci_ioremap_bar(pdev, 0);
598 if (mem_ptr0 == NULL) {
599 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
600 return -EIO;
601 }
602 pci_len0 = mem_len;
603 } else {
604 return -EIO;
605 }
606
607 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
608
b1fc6d3c
AC
609 adapter->ahw->pci_base0 = mem_ptr0;
610 adapter->ahw->pci_len0 = pci_len0;
af19b491 611
0866d96d 612 qlcnic_check_vf(adapter);
2e9d722d 613
b1fc6d3c
AC
614 adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
615 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
616 adapter->ahw->pci_func)));
af19b491
AKS
617
618 return 0;
619}
620
621static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
622{
623 struct pci_dev *pdev = adapter->pdev;
624 int i, found = 0;
625
626 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
627 if (qlcnic_boards[i].vendor == pdev->vendor &&
628 qlcnic_boards[i].device == pdev->device &&
629 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
630 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
631 sprintf(name, "%pM: %s" ,
632 adapter->mac_addr,
633 qlcnic_boards[i].short_name);
af19b491
AKS
634 found = 1;
635 break;
636 }
637
638 }
639
640 if (!found)
7f9a0c34 641 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
642}
643
644static void
645qlcnic_check_options(struct qlcnic_adapter *adapter)
646{
647 u32 fw_major, fw_minor, fw_build;
af19b491 648 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
649
650 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
651 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
652 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
653
654 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
655
251a84c9
AKS
656 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
657 fw_major, fw_minor, fw_build);
b1fc6d3c 658 if (adapter->ahw->port_type == QLCNIC_XGBE) {
90d19005
SC
659 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
660 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
661 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
662 } else {
663 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
664 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
665 }
666
af19b491 667 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
668 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
669
b1fc6d3c 670 } else if (adapter->ahw->port_type == QLCNIC_GBE) {
af19b491
AKS
671 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
672 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
673 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
674 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
675 }
676
677 adapter->msix_supported = !!use_msi_x;
af19b491
AKS
678
679 adapter->num_txd = MAX_CMD_DESCRIPTORS;
680
251b036a 681 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
682}
683
174240a8
RB
684static int
685qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
686{
687 int err;
688 struct qlcnic_info nic_info;
689
b1fc6d3c 690 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
174240a8
RB
691 if (err)
692 return err;
693
a1c0c459 694 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
695 adapter->switch_mode = nic_info.switch_mode;
696 adapter->max_tx_ques = nic_info.max_tx_ques;
697 adapter->max_rx_ques = nic_info.max_rx_ques;
698 adapter->capabilities = nic_info.capabilities;
699 adapter->max_mac_filters = nic_info.max_mac_filters;
700 adapter->max_mtu = nic_info.max_mtu;
701
702 if (adapter->capabilities & BIT_6)
703 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
704 else
705 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
706
707 return err;
708}
709
8cf61f89
AKS
710static void
711qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
712 struct qlcnic_esw_func_cfg *esw_cfg)
713{
714 if (esw_cfg->discard_tagged)
715 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
716 else
717 adapter->flags |= QLCNIC_TAGGING_ENABLED;
718
719 if (esw_cfg->vlan_id)
720 adapter->pvid = esw_cfg->vlan_id;
721 else
722 adapter->pvid = 0;
723}
724
b9796a14
AC
725static void
726qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
727{
728 struct qlcnic_adapter *adapter = netdev_priv(netdev);
729 set_bit(vid, adapter->vlans);
730}
731
732static void
733qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
734{
735 struct qlcnic_adapter *adapter = netdev_priv(netdev);
736
737 qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
738 clear_bit(vid, adapter->vlans);
739}
740
0325d69b
RB
741static void
742qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
743 struct qlcnic_esw_func_cfg *esw_cfg)
744{
ee07c1a7
RB
745 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
746 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
747
748 if (esw_cfg->mac_anti_spoof)
749 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 750
7373373d
RB
751 if (!esw_cfg->mac_override)
752 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
753
ee07c1a7
RB
754 if (!esw_cfg->promisc_mode)
755 adapter->flags |= QLCNIC_PROMISC_DISABLED;
756
0325d69b
RB
757 qlcnic_set_netdev_features(adapter, esw_cfg);
758}
759
760static int
761qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
762{
763 struct qlcnic_esw_func_cfg esw_cfg;
764
765 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
766 return 0;
767
b1fc6d3c 768 esw_cfg.pci_func = adapter->ahw->pci_func;
0325d69b
RB
769 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
770 return -EIO;
8cf61f89 771 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
772 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
773
774 return 0;
775}
776
777static void
778qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
779 struct qlcnic_esw_func_cfg *esw_cfg)
780{
781 struct net_device *netdev = adapter->netdev;
782 unsigned long features, vlan_features;
783
135d84a9 784 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
0325d69b
RB
785 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
786 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
b9796a14 787 NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
0325d69b
RB
788
789 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
790 features |= (NETIF_F_TSO | NETIF_F_TSO6);
791 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
792 }
b56421d0
RB
793
794 if (netdev->features & NETIF_F_LRO)
0325d69b
RB
795 features |= NETIF_F_LRO;
796
797 if (esw_cfg->offload_flags & BIT_0) {
798 netdev->features |= features;
0325d69b
RB
799 if (!(esw_cfg->offload_flags & BIT_1))
800 netdev->features &= ~NETIF_F_TSO;
801 if (!(esw_cfg->offload_flags & BIT_2))
802 netdev->features &= ~NETIF_F_TSO6;
803 } else {
804 netdev->features &= ~features;
0325d69b
RB
805 }
806
807 netdev->vlan_features = (features & vlan_features);
808}
809
0866d96d
AC
810static int
811qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
812{
813 void __iomem *priv_op;
814 u32 op_mode, priv_level;
815 int err = 0;
816
174240a8
RB
817 err = qlcnic_initialize_nic(adapter);
818 if (err)
819 return err;
820
0866d96d
AC
821 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
822 return 0;
823
b1fc6d3c 824 priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
0866d96d 825 op_mode = readl(priv_op);
b1fc6d3c 826 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d
AC
827
828 if (op_mode == QLC_DEV_DRV_DEFAULT)
829 priv_level = QLCNIC_MGMT_FUNC;
830 else
b1fc6d3c 831 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
0866d96d 832
174240a8 833 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
834 if (priv_level == QLCNIC_MGMT_FUNC) {
835 adapter->op_mode = QLCNIC_MGMT_FUNC;
836 err = qlcnic_init_pci_info(adapter);
837 if (err)
838 return err;
839 /* Set privilege level for other functions */
840 qlcnic_set_function_modes(adapter);
841 dev_info(&adapter->pdev->dev,
842 "HAL Version: %d, Management function\n",
843 adapter->fw_hal_version);
844 } else if (priv_level == QLCNIC_PRIV_FUNC) {
845 adapter->op_mode = QLCNIC_PRIV_FUNC;
846 dev_info(&adapter->pdev->dev,
847 "HAL Version: %d, Privileged function\n",
848 adapter->fw_hal_version);
849 }
174240a8 850 }
0866d96d
AC
851
852 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
853
854 return err;
855}
856
0325d69b
RB
857static int
858qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
859{
860 struct qlcnic_esw_func_cfg esw_cfg;
861 struct qlcnic_npar_info *npar;
862 u8 i;
863
174240a8 864 if (adapter->need_fw_reset)
0325d69b
RB
865 return 0;
866
867 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
868 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
869 continue;
870 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
871 esw_cfg.pci_func = i;
872 esw_cfg.offload_flags = BIT_0;
7373373d 873 esw_cfg.mac_override = BIT_0;
ee07c1a7 874 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
875 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
876 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
877 if (qlcnic_config_switch_port(adapter, &esw_cfg))
878 return -EIO;
879 npar = &adapter->npars[i];
880 npar->pvid = esw_cfg.vlan_id;
7373373d 881 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
882 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
883 npar->discard_tagged = esw_cfg.discard_tagged;
884 npar->promisc_mode = esw_cfg.promisc_mode;
885 npar->offload_flags = esw_cfg.offload_flags;
886 }
887
888 return 0;
889}
890
4e8acb01
RB
891static int
892qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
893 struct qlcnic_npar_info *npar, int pci_func)
894{
895 struct qlcnic_esw_func_cfg esw_cfg;
896 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
897 esw_cfg.pci_func = pci_func;
898 esw_cfg.vlan_id = npar->pvid;
7373373d 899 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
900 esw_cfg.discard_tagged = npar->discard_tagged;
901 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
902 esw_cfg.offload_flags = npar->offload_flags;
903 esw_cfg.promisc_mode = npar->promisc_mode;
904 if (qlcnic_config_switch_port(adapter, &esw_cfg))
905 return -EIO;
906
907 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
908 if (qlcnic_config_switch_port(adapter, &esw_cfg))
909 return -EIO;
910
911 return 0;
912}
913
cea8975e
AC
914static int
915qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
916{
4e8acb01 917 int i, err;
cea8975e
AC
918 struct qlcnic_npar_info *npar;
919 struct qlcnic_info nic_info;
920
174240a8 921 if (!adapter->need_fw_reset)
cea8975e
AC
922 return 0;
923
4e8acb01
RB
924 /* Set the NPAR config data after FW reset */
925 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
926 npar = &adapter->npars[i];
927 if (npar->type != QLCNIC_TYPE_NIC)
928 continue;
929 err = qlcnic_get_nic_info(adapter, &nic_info, i);
930 if (err)
931 return err;
932 nic_info.min_tx_bw = npar->min_bw;
933 nic_info.max_tx_bw = npar->max_bw;
934 err = qlcnic_set_nic_info(adapter, &nic_info);
935 if (err)
936 return err;
cea8975e 937
4e8acb01
RB
938 if (npar->enable_pm) {
939 err = qlcnic_config_port_mirroring(adapter,
940 npar->dest_npar, 1, i);
941 if (err)
942 return err;
cea8975e 943 }
4e8acb01
RB
944 err = qlcnic_reset_eswitch_config(adapter, npar, i);
945 if (err)
946 return err;
cea8975e 947 }
4e8acb01 948 return 0;
cea8975e
AC
949}
950
78f84e1a
AKS
951static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
952{
953 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
954 u32 npar_state;
955
956 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
957 return 0;
958
959 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
960 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
961 msleep(1000);
962 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
963 }
964 if (!npar_opt_timeo) {
965 dev_err(&adapter->pdev->dev,
966 "Waiting for NPAR state to opertional timeout\n");
967 return -EIO;
968 }
969 return 0;
970}
971
174240a8
RB
972static int
973qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
974{
975 int err;
976
977 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
978 adapter->op_mode != QLCNIC_MGMT_FUNC)
979 return 0;
980
981 err = qlcnic_set_default_offload_settings(adapter);
982 if (err)
983 return err;
984
985 err = qlcnic_reset_npar_config(adapter);
986 if (err)
987 return err;
988
989 qlcnic_dev_set_npar_ready(adapter);
990
991 return err;
992}
993
af19b491
AKS
994static int
995qlcnic_start_firmware(struct qlcnic_adapter *adapter)
996{
d4066833 997 int err;
af19b491 998
aa5e18c0
SC
999 err = qlcnic_can_start_firmware(adapter);
1000 if (err < 0)
1001 return err;
1002 else if (!err)
d4066833 1003 goto check_fw_status;
af19b491 1004
4d5bdb38
AKS
1005 if (load_fw_file)
1006 qlcnic_request_firmware(adapter);
8f891387 1007 else {
8cfdce08
SC
1008 err = qlcnic_check_flash_fw_ver(adapter);
1009 if (err)
8f891387 1010 goto err_out;
1011
4d5bdb38 1012 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1013 }
af19b491
AKS
1014
1015 err = qlcnic_need_fw_reset(adapter);
af19b491 1016 if (err == 0)
4e70812b 1017 goto check_fw_status;
af19b491 1018
d4066833
SC
1019 err = qlcnic_pinit_from_rom(adapter);
1020 if (err)
1021 goto err_out;
af19b491
AKS
1022
1023 err = qlcnic_load_firmware(adapter);
1024 if (err)
1025 goto err_out;
1026
1027 qlcnic_release_firmware(adapter);
d4066833 1028 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1029
d4066833
SC
1030check_fw_status:
1031 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1032 if (err)
1033 goto err_out;
1034
1035 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1036 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1037
0866d96d
AC
1038 err = qlcnic_check_eswitch_mode(adapter);
1039 if (err) {
1040 dev_err(&adapter->pdev->dev,
1041 "Memory allocation failed for eswitch\n");
1042 goto err_out;
1043 }
174240a8
RB
1044 err = qlcnic_set_mgmt_operations(adapter);
1045 if (err)
1046 goto err_out;
1047
1048 qlcnic_check_options(adapter);
af19b491
AKS
1049 adapter->need_fw_reset = 0;
1050
a7fc948f
AKS
1051 qlcnic_release_firmware(adapter);
1052 return 0;
af19b491
AKS
1053
1054err_out:
a7fc948f
AKS
1055 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1056 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1057
af19b491
AKS
1058 qlcnic_release_firmware(adapter);
1059 return err;
1060}
1061
1062static int
1063qlcnic_request_irq(struct qlcnic_adapter *adapter)
1064{
1065 irq_handler_t handler;
1066 struct qlcnic_host_sds_ring *sds_ring;
1067 int err, ring;
1068
1069 unsigned long flags = 0;
1070 struct net_device *netdev = adapter->netdev;
b1fc6d3c 1071 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491 1072
7eb9855d
AKS
1073 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1074 handler = qlcnic_tmp_intr;
1075 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1076 flags |= IRQF_SHARED;
1077
1078 } else {
1079 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1080 handler = qlcnic_msix_intr;
1081 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1082 handler = qlcnic_msi_intr;
1083 else {
1084 flags |= IRQF_SHARED;
1085 handler = qlcnic_intr;
1086 }
af19b491
AKS
1087 }
1088 adapter->irq = netdev->irq;
1089
1090 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1091 sds_ring = &recv_ctx->sds_rings[ring];
1092 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1093 err = request_irq(sds_ring->irq, handler,
1094 flags, sds_ring->name, sds_ring);
1095 if (err)
1096 return err;
1097 }
1098
1099 return 0;
1100}
1101
1102static void
1103qlcnic_free_irq(struct qlcnic_adapter *adapter)
1104{
1105 int ring;
1106 struct qlcnic_host_sds_ring *sds_ring;
1107
b1fc6d3c 1108 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
af19b491
AKS
1109
1110 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1111 sds_ring = &recv_ctx->sds_rings[ring];
1112 free_irq(sds_ring->irq, sds_ring);
1113 }
1114}
1115
af19b491
AKS
1116static int
1117__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1118{
8a15ad1f
AKS
1119 int ring;
1120 struct qlcnic_host_rds_ring *rds_ring;
1121
af19b491
AKS
1122 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1123 return -EIO;
1124
8a15ad1f
AKS
1125 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1126 return 0;
0325d69b
RB
1127 if (qlcnic_set_eswitch_port_config(adapter))
1128 return -EIO;
8a15ad1f
AKS
1129
1130 if (qlcnic_fw_create_ctx(adapter))
1131 return -EIO;
1132
1133 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1134 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1135 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1136 }
1137
af19b491
AKS
1138 qlcnic_set_multi(netdev);
1139 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1140
b1fc6d3c 1141 adapter->ahw->linkup = 0;
af19b491
AKS
1142
1143 if (adapter->max_sds_rings > 1)
1144 qlcnic_config_rss(adapter, 1);
1145
1146 qlcnic_config_intr_coalesce(adapter);
1147
24763d80 1148 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1149 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1150
1151 qlcnic_napi_enable(adapter);
1152
1153 qlcnic_linkevent_request(adapter, 1);
1154
68bf1c68 1155 adapter->reset_context = 0;
af19b491
AKS
1156 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1157 return 0;
1158}
1159
1160/* Usage: During resume and firmware recovery module.*/
1161
1162static int
1163qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1164{
1165 int err = 0;
1166
1167 rtnl_lock();
1168 if (netif_running(netdev))
1169 err = __qlcnic_up(adapter, netdev);
1170 rtnl_unlock();
1171
1172 return err;
1173}
1174
1175static void
1176__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1177{
1178 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1179 return;
1180
1181 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1182 return;
1183
1184 smp_mb();
1185 spin_lock(&adapter->tx_clean_lock);
1186 netif_carrier_off(netdev);
1187 netif_tx_disable(netdev);
1188
1189 qlcnic_free_mac_list(adapter);
1190
b5e5492c
AKS
1191 if (adapter->fhash.fnum)
1192 qlcnic_delete_lb_filters(adapter);
1193
af19b491
AKS
1194 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1195
1196 qlcnic_napi_disable(adapter);
1197
8a15ad1f
AKS
1198 qlcnic_fw_destroy_ctx(adapter);
1199
1200 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1201 qlcnic_release_tx_buffers(adapter);
1202 spin_unlock(&adapter->tx_clean_lock);
1203}
1204
1205/* Usage: During suspend and firmware recovery module */
1206
1207static void
1208qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1209{
1210 rtnl_lock();
1211 if (netif_running(netdev))
1212 __qlcnic_down(adapter, netdev);
1213 rtnl_unlock();
1214
1215}
1216
1217static int
1218qlcnic_attach(struct qlcnic_adapter *adapter)
1219{
1220 struct net_device *netdev = adapter->netdev;
1221 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1222 int err;
af19b491
AKS
1223
1224 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1225 return 0;
1226
af19b491
AKS
1227 err = qlcnic_napi_add(adapter, netdev);
1228 if (err)
1229 return err;
1230
1231 err = qlcnic_alloc_sw_resources(adapter);
1232 if (err) {
1233 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1234 goto err_out_napi_del;
af19b491
AKS
1235 }
1236
1237 err = qlcnic_alloc_hw_resources(adapter);
1238 if (err) {
1239 dev_err(&pdev->dev, "Error in setting hw resources\n");
1240 goto err_out_free_sw;
1241 }
1242
af19b491
AKS
1243 err = qlcnic_request_irq(adapter);
1244 if (err) {
1245 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1246 goto err_out_free_hw;
af19b491
AKS
1247 }
1248
af19b491
AKS
1249 qlcnic_create_sysfs_entries(adapter);
1250
1251 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1252 return 0;
1253
8a15ad1f 1254err_out_free_hw:
af19b491
AKS
1255 qlcnic_free_hw_resources(adapter);
1256err_out_free_sw:
1257 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1258err_out_napi_del:
1259 qlcnic_napi_del(adapter);
af19b491
AKS
1260 return err;
1261}
1262
1263static void
1264qlcnic_detach(struct qlcnic_adapter *adapter)
1265{
1266 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1267 return;
1268
1269 qlcnic_remove_sysfs_entries(adapter);
1270
1271 qlcnic_free_hw_resources(adapter);
1272 qlcnic_release_rx_buffers(adapter);
1273 qlcnic_free_irq(adapter);
1274 qlcnic_napi_del(adapter);
1275 qlcnic_free_sw_resources(adapter);
1276
1277 adapter->is_up = 0;
1278}
1279
7eb9855d
AKS
1280void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1281{
1282 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1283 struct qlcnic_host_sds_ring *sds_ring;
1284 int ring;
1285
78ad3892 1286 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1287 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1288 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1289 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1290 qlcnic_disable_int(sds_ring);
1291 }
7eb9855d
AKS
1292 }
1293
8a15ad1f
AKS
1294 qlcnic_fw_destroy_ctx(adapter);
1295
7eb9855d
AKS
1296 qlcnic_detach(adapter);
1297
1298 adapter->diag_test = 0;
1299 adapter->max_sds_rings = max_sds_rings;
1300
1301 if (qlcnic_attach(adapter))
34ce3626 1302 goto out;
7eb9855d
AKS
1303
1304 if (netif_running(netdev))
1305 __qlcnic_up(adapter, netdev);
34ce3626 1306out:
7eb9855d
AKS
1307 netif_device_attach(netdev);
1308}
1309
b1fc6d3c
AC
1310static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
1311{
1312 int err = 0;
1313 adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
1314 GFP_KERNEL);
1315 if (!adapter->ahw) {
1316 dev_err(&adapter->pdev->dev,
1317 "Failed to allocate recv ctx resources for adapter\n");
1318 err = -ENOMEM;
1319 goto err_out;
1320 }
1321 adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
1322 GFP_KERNEL);
1323 if (!adapter->recv_ctx) {
1324 dev_err(&adapter->pdev->dev,
1325 "Failed to allocate recv ctx resources for adapter\n");
1326 kfree(adapter->ahw);
1327 adapter->ahw = NULL;
1328 err = -ENOMEM;
8816d009 1329 goto err_out;
b1fc6d3c 1330 }
8816d009
AC
1331 /* Initialize interrupt coalesce parameters */
1332 adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
1333 adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1334 adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
b1fc6d3c
AC
1335err_out:
1336 return err;
1337}
1338
1339static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
1340{
1341 kfree(adapter->recv_ctx);
1342 adapter->recv_ctx = NULL;
1343
18f2f616
AC
1344 if (adapter->ahw->fw_dump.tmpl_hdr) {
1345 vfree(adapter->ahw->fw_dump.tmpl_hdr);
1346 adapter->ahw->fw_dump.tmpl_hdr = NULL;
1347 }
b1fc6d3c
AC
1348 kfree(adapter->ahw);
1349 adapter->ahw = NULL;
1350}
1351
7eb9855d
AKS
1352int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1353{
1354 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1355 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1356 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1357 int ring;
1358 int ret;
1359
1360 netif_device_detach(netdev);
1361
1362 if (netif_running(netdev))
1363 __qlcnic_down(adapter, netdev);
1364
1365 qlcnic_detach(adapter);
1366
1367 adapter->max_sds_rings = 1;
1368 adapter->diag_test = test;
1369
1370 ret = qlcnic_attach(adapter);
34ce3626
AKS
1371 if (ret) {
1372 netif_device_attach(netdev);
7eb9855d 1373 return ret;
34ce3626 1374 }
7eb9855d 1375
8a15ad1f
AKS
1376 ret = qlcnic_fw_create_ctx(adapter);
1377 if (ret) {
1378 qlcnic_detach(adapter);
57e46248 1379 netif_device_attach(netdev);
8a15ad1f
AKS
1380 return ret;
1381 }
1382
1383 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
b1fc6d3c
AC
1384 rds_ring = &adapter->recv_ctx->rds_rings[ring];
1385 qlcnic_post_rx_buffers(adapter, rds_ring);
8a15ad1f
AKS
1386 }
1387
cdaff185
AKS
1388 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1389 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
b1fc6d3c 1390 sds_ring = &adapter->recv_ctx->sds_rings[ring];
cdaff185
AKS
1391 qlcnic_enable_int(sds_ring);
1392 }
7eb9855d 1393 }
22c8c934
SC
1394
1395 if (adapter->diag_test == QLCNIC_LOOPBACK_TEST) {
1396 adapter->ahw->loopback_state = 0;
1397 qlcnic_linkevent_request(adapter, 1);
1398 }
1399
78ad3892 1400 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1401
1402 return 0;
1403}
1404
68bf1c68
AKS
1405/* Reset context in hardware only */
1406static int
1407qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1408{
1409 struct net_device *netdev = adapter->netdev;
1410
1411 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1412 return -EBUSY;
1413
1414 netif_device_detach(netdev);
1415
1416 qlcnic_down(adapter, netdev);
1417
1418 qlcnic_up(adapter, netdev);
1419
1420 netif_device_attach(netdev);
1421
1422 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1423 return 0;
1424}
1425
af19b491
AKS
1426int
1427qlcnic_reset_context(struct qlcnic_adapter *adapter)
1428{
1429 int err = 0;
1430 struct net_device *netdev = adapter->netdev;
1431
1432 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1433 return -EBUSY;
1434
1435 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1436
1437 netif_device_detach(netdev);
1438
1439 if (netif_running(netdev))
1440 __qlcnic_down(adapter, netdev);
1441
1442 qlcnic_detach(adapter);
1443
1444 if (netif_running(netdev)) {
1445 err = qlcnic_attach(adapter);
1446 if (!err)
34ce3626 1447 __qlcnic_up(adapter, netdev);
af19b491
AKS
1448 }
1449
1450 netif_device_attach(netdev);
1451 }
1452
af19b491
AKS
1453 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1454 return err;
1455}
1456
1457static int
1458qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1459 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1460{
1461 int err;
1462 struct pci_dev *pdev = adapter->pdev;
1463
af19b491
AKS
1464 adapter->mc_enabled = 0;
1465 adapter->max_mc_count = 38;
1466
1467 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1468 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1469
1470 qlcnic_change_mtu(netdev, netdev->mtu);
1471
1472 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1473
135d84a9
MM
1474 netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1475 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
ac8d0c4f 1476
135d84a9
MM
1477 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1478 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1479 if (pci_using_dac)
1480 netdev->hw_features |= NETIF_F_HIGHDMA;
af19b491 1481
135d84a9 1482 netdev->vlan_features = netdev->hw_features;
af19b491
AKS
1483
1484 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
135d84a9 1485 netdev->hw_features |= NETIF_F_HW_VLAN_TX;
af19b491 1486 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
135d84a9
MM
1487 netdev->hw_features |= NETIF_F_LRO;
1488
1489 netdev->features |= netdev->hw_features |
1490 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
1491
af19b491
AKS
1492 netdev->irq = adapter->msix_entries[0].vector;
1493
af19b491
AKS
1494 err = register_netdev(netdev);
1495 if (err) {
1496 dev_err(&pdev->dev, "failed to register net device\n");
1497 return err;
1498 }
1499
1500 return 0;
1501}
1502
1bb09fb9
AKS
1503static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1504{
1505 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1506 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1507 *pci_using_dac = 1;
1508 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1509 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1510 *pci_using_dac = 0;
1511 else {
1512 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1513 return -EIO;
1514 }
1515
1516 return 0;
1517}
1518
f94bc1e7
SC
1519static int
1520qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
1521{
1522 adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
1523 GFP_KERNEL);
1524
1525 if (adapter->msix_entries)
1526 return 0;
1527
1528 dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
1529 return -ENOMEM;
1530}
1531
af19b491
AKS
1532static int __devinit
1533qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1534{
1535 struct net_device *netdev = NULL;
1536 struct qlcnic_adapter *adapter = NULL;
1537 int err;
af19b491 1538 uint8_t revision_id;
1bb09fb9 1539 uint8_t pci_using_dac;
da48e6c3 1540 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1541
1542 err = pci_enable_device(pdev);
1543 if (err)
1544 return err;
1545
1546 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1547 err = -ENODEV;
1548 goto err_out_disable_pdev;
1549 }
1550
1bb09fb9
AKS
1551 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1552 if (err)
1553 goto err_out_disable_pdev;
1554
af19b491
AKS
1555 err = pci_request_regions(pdev, qlcnic_driver_name);
1556 if (err)
1557 goto err_out_disable_pdev;
1558
1559 pci_set_master(pdev);
451724c8 1560 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1561
1562 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1563 if (!netdev) {
1564 dev_err(&pdev->dev, "failed to allocate net_device\n");
1565 err = -ENOMEM;
1566 goto err_out_free_res;
1567 }
1568
1569 SET_NETDEV_DEV(netdev, &pdev->dev);
1570
1571 adapter = netdev_priv(netdev);
1572 adapter->netdev = netdev;
1573 adapter->pdev = pdev;
af19b491 1574
b1fc6d3c
AC
1575 if (qlcnic_alloc_adapter_resources(adapter))
1576 goto err_out_free_netdev;
1577
1578 adapter->dev_rst_time = jiffies;
af19b491 1579 revision_id = pdev->revision;
b1fc6d3c 1580 adapter->ahw->revision_id = revision_id;
af19b491 1581
b1fc6d3c
AC
1582 rwlock_init(&adapter->ahw->crb_lock);
1583 mutex_init(&adapter->ahw->mem_lock);
af19b491
AKS
1584
1585 spin_lock_init(&adapter->tx_clean_lock);
1586 INIT_LIST_HEAD(&adapter->mac_list);
1587
1588 err = qlcnic_setup_pci_map(adapter);
1589 if (err)
b1fc6d3c 1590 goto err_out_free_hw;
af19b491
AKS
1591
1592 /* This will be reset for mezz cards */
b1fc6d3c 1593 adapter->portnum = adapter->ahw->pci_func;
af19b491
AKS
1594
1595 err = qlcnic_get_board_info(adapter);
1596 if (err) {
1597 dev_err(&pdev->dev, "Error getting board config info.\n");
1598 goto err_out_iounmap;
1599 }
1600
8cfdce08
SC
1601 err = qlcnic_setup_idc_param(adapter);
1602 if (err)
b3a24649 1603 goto err_out_iounmap;
af19b491 1604
1dc0f3c5 1605 adapter->flags |= QLCNIC_NEED_FLR;
b0044bcf 1606
9f26f547 1607 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1608 if (err) {
1609 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1610 goto err_out_decr_ref;
a7fc948f 1611 }
af19b491 1612
602ca6f0
SV
1613 /* Get FW dump template and store it */
1614 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
1615 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1616 dev_info(&pdev->dev,
1617 "Supports FW dump capability\n");
1618
da48e6c3
RB
1619 if (qlcnic_read_mac_addr(adapter))
1620 dev_warn(&pdev->dev, "failed to read mac addr\n");
1621
1622 if (adapter->portnum == 0) {
1623 get_brd_name(adapter, brd_name);
1624
1625 pr_info("%s: %s Board Chip rev 0x%x\n",
1626 module_name(THIS_MODULE),
b1fc6d3c 1627 brd_name, adapter->ahw->revision_id);
da48e6c3
RB
1628 }
1629
af19b491
AKS
1630 qlcnic_clear_stats(adapter);
1631
f94bc1e7
SC
1632 err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
1633 if (err)
1634 goto err_out_decr_ref;
1635
af19b491
AKS
1636 qlcnic_setup_intr(adapter);
1637
1bb09fb9 1638 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1639 if (err)
1640 goto err_out_disable_msi;
1641
1642 pci_set_drvdata(pdev, adapter);
1643
1644 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1645
b1fc6d3c 1646 switch (adapter->ahw->port_type) {
af19b491
AKS
1647 case QLCNIC_GBE:
1648 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1649 adapter->netdev->name);
1650 break;
1651 case QLCNIC_XGBE:
1652 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1653 adapter->netdev->name);
1654 break;
1655 }
1656
b5e5492c 1657 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1658 qlcnic_create_diag_entries(adapter);
1659
1660 return 0;
1661
1662err_out_disable_msi:
1663 qlcnic_teardown_intr(adapter);
f94bc1e7 1664 kfree(adapter->msix_entries);
af19b491
AKS
1665
1666err_out_decr_ref:
21854f02 1667 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1668
1669err_out_iounmap:
1670 qlcnic_cleanup_pci_map(adapter);
1671
b1fc6d3c
AC
1672err_out_free_hw:
1673 qlcnic_free_adapter_resources(adapter);
1674
af19b491
AKS
1675err_out_free_netdev:
1676 free_netdev(netdev);
1677
1678err_out_free_res:
1679 pci_release_regions(pdev);
1680
1681err_out_disable_pdev:
1682 pci_set_drvdata(pdev, NULL);
1683 pci_disable_device(pdev);
1684 return err;
1685}
1686
1687static void __devexit qlcnic_remove(struct pci_dev *pdev)
1688{
1689 struct qlcnic_adapter *adapter;
1690 struct net_device *netdev;
1691
1692 adapter = pci_get_drvdata(pdev);
1693 if (adapter == NULL)
1694 return;
1695
1696 netdev = adapter->netdev;
1697
1698 qlcnic_cancel_fw_work(adapter);
1699
1700 unregister_netdev(netdev);
1701
af19b491
AKS
1702 qlcnic_detach(adapter);
1703
2e9d722d
AC
1704 if (adapter->npars != NULL)
1705 kfree(adapter->npars);
1706 if (adapter->eswitch != NULL)
1707 kfree(adapter->eswitch);
1708
21854f02 1709 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1710
1711 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1712
b5e5492c
AKS
1713 qlcnic_free_lb_filters_mem(adapter);
1714
af19b491 1715 qlcnic_teardown_intr(adapter);
f94bc1e7 1716 kfree(adapter->msix_entries);
af19b491
AKS
1717
1718 qlcnic_remove_diag_entries(adapter);
1719
1720 qlcnic_cleanup_pci_map(adapter);
1721
1722 qlcnic_release_firmware(adapter);
1723
451724c8 1724 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1725 pci_release_regions(pdev);
1726 pci_disable_device(pdev);
1727 pci_set_drvdata(pdev, NULL);
1728
b1fc6d3c 1729 qlcnic_free_adapter_resources(adapter);
af19b491
AKS
1730 free_netdev(netdev);
1731}
1732static int __qlcnic_shutdown(struct pci_dev *pdev)
1733{
1734 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1735 struct net_device *netdev = adapter->netdev;
1736 int retval;
1737
1738 netif_device_detach(netdev);
1739
1740 qlcnic_cancel_fw_work(adapter);
1741
1742 if (netif_running(netdev))
1743 qlcnic_down(adapter, netdev);
1744
21854f02 1745 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1746
1747 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1748
1749 retval = pci_save_state(pdev);
1750 if (retval)
1751 return retval;
1752
1753 if (qlcnic_wol_supported(adapter)) {
1754 pci_enable_wake(pdev, PCI_D3cold, 1);
1755 pci_enable_wake(pdev, PCI_D3hot, 1);
1756 }
1757
1758 return 0;
1759}
1760
1761static void qlcnic_shutdown(struct pci_dev *pdev)
1762{
1763 if (__qlcnic_shutdown(pdev))
1764 return;
1765
1766 pci_disable_device(pdev);
1767}
1768
1769#ifdef CONFIG_PM
1770static int
1771qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1772{
1773 int retval;
1774
1775 retval = __qlcnic_shutdown(pdev);
1776 if (retval)
1777 return retval;
1778
1779 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1780 return 0;
1781}
1782
1783static int
1784qlcnic_resume(struct pci_dev *pdev)
1785{
1786 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1787 struct net_device *netdev = adapter->netdev;
1788 int err;
1789
1790 err = pci_enable_device(pdev);
1791 if (err)
1792 return err;
1793
1794 pci_set_power_state(pdev, PCI_D0);
1795 pci_set_master(pdev);
1796 pci_restore_state(pdev);
1797
9f26f547 1798 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1799 if (err) {
1800 dev_err(&pdev->dev, "failed to start firmware\n");
1801 return err;
1802 }
1803
1804 if (netif_running(netdev)) {
af19b491
AKS
1805 err = qlcnic_up(adapter, netdev);
1806 if (err)
52486a3a 1807 goto done;
af19b491 1808
aec1e845 1809 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1810 }
52486a3a 1811done:
af19b491
AKS
1812 netif_device_attach(netdev);
1813 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1814 return 0;
af19b491
AKS
1815}
1816#endif
1817
1818static int qlcnic_open(struct net_device *netdev)
1819{
1820 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1821 int err;
1822
c55ad8e5
AKS
1823 netif_carrier_off(netdev);
1824
af19b491
AKS
1825 err = qlcnic_attach(adapter);
1826 if (err)
1827 return err;
1828
1829 err = __qlcnic_up(adapter, netdev);
1830 if (err)
1831 goto err_out;
1832
1833 netif_start_queue(netdev);
1834
1835 return 0;
1836
1837err_out:
1838 qlcnic_detach(adapter);
1839 return err;
1840}
1841
1842/*
1843 * qlcnic_close - Disables a network interface entry point
1844 */
1845static int qlcnic_close(struct net_device *netdev)
1846{
1847 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1848
1849 __qlcnic_down(adapter, netdev);
1850 return 0;
1851}
1852
b5e5492c
AKS
1853static void
1854qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1855{
1856 void *head;
1857 int i;
1858
1859 if (!qlcnic_mac_learn)
1860 return;
1861
1862 spin_lock_init(&adapter->mac_learn_lock);
1863
1864 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1865 GFP_KERNEL);
1866 if (!head)
1867 return;
1868
1869 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
43d620c8 1870 adapter->fhash.fhead = head;
b5e5492c
AKS
1871
1872 for (i = 0; i < adapter->fhash.fmax; i++)
1873 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1874}
1875
1876static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1877{
1878 if (adapter->fhash.fmax && adapter->fhash.fhead)
1879 kfree(adapter->fhash.fhead);
1880
1881 adapter->fhash.fhead = NULL;
1882 adapter->fhash.fmax = 0;
1883}
1884
1885static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1886 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1887{
1888 struct cmd_desc_type0 *hwdesc;
1889 struct qlcnic_nic_req *req;
1890 struct qlcnic_mac_req *mac_req;
7e56cac4 1891 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1892 u32 producer;
1893 u64 word;
1894
1895 producer = tx_ring->producer;
1896 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1897
1898 req = (struct qlcnic_nic_req *)hwdesc;
1899 memset(req, 0, sizeof(struct qlcnic_nic_req));
1900 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1901
1902 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1903 req->req_hdr = cpu_to_le64(word);
1904
1905 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1906 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1907 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1908
7e56cac4
SC
1909 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1910 vlan_req->vlan_id = vlan_id;
03c5d770 1911
b5e5492c 1912 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
036d61f0 1913 smp_mb();
b5e5492c
AKS
1914}
1915
1916#define QLCNIC_MAC_HASH(MAC)\
1917 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1918
1919static void
1920qlcnic_send_filter(struct qlcnic_adapter *adapter,
1921 struct qlcnic_host_tx_ring *tx_ring,
1922 struct cmd_desc_type0 *first_desc,
1923 struct sk_buff *skb)
1924{
1925 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1926 struct qlcnic_filter *fil, *tmp_fil;
1927 struct hlist_node *tmp_hnode, *n;
1928 struct hlist_head *head;
1929 u64 src_addr = 0;
7e56cac4 1930 __le16 vlan_id = 0;
b5e5492c
AKS
1931 u8 hindex;
1932
1933 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1934 return;
1935
1936 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1937 return;
1938
03c5d770
AKS
1939 /* Only NPAR capable devices support vlan based learning*/
1940 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1941 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1942 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1943 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1944 head = &(adapter->fhash.fhead[hindex]);
1945
1946 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1947 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1948 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1949
1950 if (jiffies >
1951 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1952 qlcnic_change_filter(adapter, src_addr, vlan_id,
1953 tx_ring);
b5e5492c
AKS
1954 tmp_fil->ftime = jiffies;
1955 return;
1956 }
1957 }
1958
1959 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1960 if (!fil)
1961 return;
1962
03c5d770 1963 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1964
1965 fil->ftime = jiffies;
03c5d770 1966 fil->vlan_id = vlan_id;
b5e5492c
AKS
1967 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1968 spin_lock(&adapter->mac_learn_lock);
1969 hlist_add_head(&(fil->fnode), head);
1970 adapter->fhash.fnum++;
1971 spin_unlock(&adapter->mac_learn_lock);
1972}
1973
036d61f0
AC
1974static int
1975qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
af19b491
AKS
1976 struct cmd_desc_type0 *first_desc,
1977 struct sk_buff *skb)
1978{
036d61f0
AC
1979 u8 opcode = 0, hdr_len = 0;
1980 u16 flags = 0, vlan_tci = 0;
1981 int copied, offset, copy_len;
af19b491
AKS
1982 struct cmd_desc_type0 *hwdesc;
1983 struct vlan_ethhdr *vh;
036d61f0
AC
1984 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1985 u16 protocol = ntohs(skb->protocol);
2e9d722d 1986 u32 producer = tx_ring->producer;
036d61f0
AC
1987
1988 if (protocol == ETH_P_8021Q) {
1989 vh = (struct vlan_ethhdr *)skb->data;
1990 flags = FLAGS_VLAN_TAGGED;
1991 vlan_tci = vh->h_vlan_TCI;
1992 } else if (vlan_tx_tag_present(skb)) {
1993 flags = FLAGS_VLAN_OOB;
1994 vlan_tci = vlan_tx_tag_get(skb);
1995 }
1996 if (unlikely(adapter->pvid)) {
1997 if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1998 return -EIO;
1999 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
2000 goto set_flags;
2001
2002 flags = FLAGS_VLAN_OOB;
2003 vlan_tci = adapter->pvid;
2004 }
2005set_flags:
2006 qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
2007 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
af19b491 2008
2e9d722d
AC
2009 if (*(skb->data) & BIT_0) {
2010 flags |= BIT_0;
2011 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
2012 }
036d61f0
AC
2013 opcode = TX_ETHER_PKT;
2014 if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
af19b491
AKS
2015 skb_shinfo(skb)->gso_size > 0) {
2016
2017 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2018
2019 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2020 first_desc->total_hdr_length = hdr_len;
036d61f0
AC
2021
2022 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
2023
2024 /* For LSO, we need to copy the MAC/IP/TCP headers into
2025 * the descriptor ring */
2026 copied = 0;
2027 offset = 2;
2028
2029 if (flags & FLAGS_VLAN_OOB) {
af19b491
AKS
2030 first_desc->total_hdr_length += VLAN_HLEN;
2031 first_desc->tcp_hdr_offset = VLAN_HLEN;
2032 first_desc->ip_hdr_offset = VLAN_HLEN;
2033 /* Only in case of TSO on vlan device */
2034 flags |= FLAGS_VLAN_TAGGED;
036d61f0
AC
2035
2036 /* Create a TSO vlan header template for firmware */
2037
2038 hwdesc = &tx_ring->desc_head[producer];
2039 tx_ring->cmd_buf_arr[producer].skb = NULL;
2040
2041 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2042 offset, hdr_len + VLAN_HLEN);
2043
2044 vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
2045 skb_copy_from_linear_data(skb, vh, 12);
2046 vh->h_vlan_proto = htons(ETH_P_8021Q);
2047 vh->h_vlan_TCI = htons(vlan_tci);
2048
2049 skb_copy_from_linear_data_offset(skb, 12,
2050 (char *)vh + 16, copy_len - 16);
2051
2052 copied = copy_len - VLAN_HLEN;
2053 offset = 0;
2054
2055 producer = get_next_index(producer, tx_ring->num_desc);
af19b491
AKS
2056 }
2057
036d61f0
AC
2058 while (copied < hdr_len) {
2059
2060 copy_len = min((int)sizeof(struct cmd_desc_type0) -
2061 offset, (hdr_len - copied));
2062
2063 hwdesc = &tx_ring->desc_head[producer];
2064 tx_ring->cmd_buf_arr[producer].skb = NULL;
2065
2066 skb_copy_from_linear_data_offset(skb, copied,
2067 (char *) hwdesc + offset, copy_len);
2068
2069 copied += copy_len;
2070 offset = 0;
2071
2072 producer = get_next_index(producer, tx_ring->num_desc);
2073 }
2074
2075 tx_ring->producer = producer;
2076 smp_mb();
2077 adapter->stats.lso_frames++;
af19b491
AKS
2078
2079 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2080 u8 l4proto;
2081
036d61f0 2082 if (protocol == ETH_P_IP) {
af19b491
AKS
2083 l4proto = ip_hdr(skb)->protocol;
2084
2085 if (l4proto == IPPROTO_TCP)
2086 opcode = TX_TCP_PKT;
2087 else if (l4proto == IPPROTO_UDP)
2088 opcode = TX_UDP_PKT;
036d61f0 2089 } else if (protocol == ETH_P_IPV6) {
af19b491
AKS
2090 l4proto = ipv6_hdr(skb)->nexthdr;
2091
2092 if (l4proto == IPPROTO_TCP)
2093 opcode = TX_TCPV6_PKT;
2094 else if (l4proto == IPPROTO_UDP)
2095 opcode = TX_UDPV6_PKT;
2096 }
2097 }
af19b491
AKS
2098 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
2099 first_desc->ip_hdr_offset += skb_network_offset(skb);
2100 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2101
036d61f0 2102 return 0;
af19b491
AKS
2103}
2104
2105static int
2106qlcnic_map_tx_skb(struct pci_dev *pdev,
2107 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2108{
2109 struct qlcnic_skb_frag *nf;
2110 struct skb_frag_struct *frag;
2111 int i, nr_frags;
2112 dma_addr_t map;
2113
2114 nr_frags = skb_shinfo(skb)->nr_frags;
2115 nf = &pbuf->frag_array[0];
2116
2117 map = pci_map_single(pdev, skb->data,
2118 skb_headlen(skb), PCI_DMA_TODEVICE);
2119 if (pci_dma_mapping_error(pdev, map))
2120 goto out_err;
2121
2122 nf->dma = map;
2123 nf->length = skb_headlen(skb);
2124
2125 for (i = 0; i < nr_frags; i++) {
2126 frag = &skb_shinfo(skb)->frags[i];
2127 nf = &pbuf->frag_array[i+1];
2128
2129 map = pci_map_page(pdev, frag->page, frag->page_offset,
2130 frag->size, PCI_DMA_TODEVICE);
2131 if (pci_dma_mapping_error(pdev, map))
2132 goto unwind;
2133
2134 nf->dma = map;
2135 nf->length = frag->size;
2136 }
2137
2138 return 0;
2139
2140unwind:
2141 while (--i >= 0) {
2142 nf = &pbuf->frag_array[i+1];
2143 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2144 }
2145
2146 nf = &pbuf->frag_array[0];
2147 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2148
2149out_err:
2150 return -ENOMEM;
2151}
2152
036d61f0
AC
2153static void
2154qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2155 struct qlcnic_cmd_buffer *pbuf)
8cf61f89 2156{
036d61f0
AC
2157 struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
2158 int nr_frags = skb_shinfo(skb)->nr_frags;
2159 int i;
8cf61f89 2160
036d61f0
AC
2161 for (i = 0; i < nr_frags; i++) {
2162 nf = &pbuf->frag_array[i+1];
2163 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
8cf61f89 2164 }
8cf61f89 2165
036d61f0
AC
2166 nf = &pbuf->frag_array[0];
2167 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
5b446c6a 2168 pbuf->skb = NULL;
8cf61f89
AKS
2169}
2170
af19b491
AKS
2171static inline void
2172qlcnic_clear_cmddesc(u64 *desc)
2173{
2174 desc[0] = 0ULL;
2175 desc[2] = 0ULL;
8cf61f89 2176 desc[7] = 0ULL;
af19b491
AKS
2177}
2178
cdaff185 2179netdev_tx_t
af19b491
AKS
2180qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2181{
2182 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2183 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2184 struct qlcnic_cmd_buffer *pbuf;
2185 struct qlcnic_skb_frag *buffrag;
2186 struct cmd_desc_type0 *hwdesc, *first_desc;
2187 struct pci_dev *pdev;
dcb50aff 2188 struct ethhdr *phdr;
91a403ca 2189 int delta = 0;
af19b491
AKS
2190 int i, k;
2191
2192 u32 producer;
036d61f0 2193 int frag_count;
af19b491
AKS
2194 u32 num_txd = tx_ring->num_desc;
2195
780ab790
AKS
2196 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2197 netif_stop_queue(netdev);
2198 return NETDEV_TX_BUSY;
2199 }
2200
fe4d434d 2201 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2202 phdr = (struct ethhdr *)skb->data;
2203 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2204 adapter->mac_addr))
2205 goto drop_packet;
2206 }
2207
af19b491 2208 frag_count = skb_shinfo(skb)->nr_frags + 1;
91a403ca
AKS
2209 /* 14 frags supported for normal packet and
2210 * 32 frags supported for TSO packet
2211 */
2212 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
2213
2214 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
2215 delta += skb_shinfo(skb)->frags[i].size;
2216
2217 if (!__pskb_pull_tail(skb, delta))
2218 goto drop_packet;
2219
2220 frag_count = 1 + skb_shinfo(skb)->nr_frags;
2221 }
af19b491 2222
ef71ff83 2223 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2224 netif_stop_queue(netdev);
ef71ff83
RB
2225 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2226 netif_start_queue(netdev);
2227 else {
2228 adapter->stats.xmit_off++;
2229 return NETDEV_TX_BUSY;
2230 }
af19b491
AKS
2231 }
2232
2233 producer = tx_ring->producer;
2234 pbuf = &tx_ring->cmd_buf_arr[producer];
2235
2236 pdev = adapter->pdev;
2237
8cf61f89
AKS
2238 first_desc = hwdesc = &tx_ring->desc_head[producer];
2239 qlcnic_clear_cmddesc((u64 *)hwdesc);
2240
8ae6df97
AKS
2241 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2242 adapter->stats.tx_dma_map_error++;
af19b491 2243 goto drop_packet;
8ae6df97 2244 }
af19b491
AKS
2245
2246 pbuf->skb = skb;
2247 pbuf->frag_count = frag_count;
2248
af19b491
AKS
2249 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2250 qlcnic_set_tx_port(first_desc, adapter->portnum);
2251
2252 for (i = 0; i < frag_count; i++) {
2253
2254 k = i % 4;
2255
2256 if ((k == 0) && (i > 0)) {
2257 /* move to next desc.*/
2258 producer = get_next_index(producer, num_txd);
2259 hwdesc = &tx_ring->desc_head[producer];
2260 qlcnic_clear_cmddesc((u64 *)hwdesc);
2261 tx_ring->cmd_buf_arr[producer].skb = NULL;
2262 }
2263
2264 buffrag = &pbuf->frag_array[i];
2265
2266 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2267 switch (k) {
2268 case 0:
2269 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2270 break;
2271 case 1:
2272 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2273 break;
2274 case 2:
2275 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2276 break;
2277 case 3:
2278 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2279 break;
2280 }
2281 }
2282
2283 tx_ring->producer = get_next_index(producer, num_txd);
036d61f0 2284 smp_mb();
af19b491 2285
036d61f0
AC
2286 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
2287 goto unwind_buff;
af19b491 2288
b5e5492c
AKS
2289 if (qlcnic_mac_learn)
2290 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2291
af19b491
AKS
2292 qlcnic_update_cmd_producer(adapter, tx_ring);
2293
2294 adapter->stats.txbytes += skb->len;
2295 adapter->stats.xmitcalled++;
2296
2297 return NETDEV_TX_OK;
2298
036d61f0
AC
2299unwind_buff:
2300 qlcnic_unmap_buffers(pdev, skb, pbuf);
af19b491
AKS
2301drop_packet:
2302 adapter->stats.txdropped++;
2303 dev_kfree_skb_any(skb);
2304 return NETDEV_TX_OK;
2305}
2306
2307static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2308{
2309 struct net_device *netdev = adapter->netdev;
2310 u32 temp, temp_state, temp_val;
2311 int rv = 0;
2312
2313 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2314
2315 temp_state = qlcnic_get_temp_state(temp);
2316 temp_val = qlcnic_get_temp_val(temp);
2317
2318 if (temp_state == QLCNIC_TEMP_PANIC) {
2319 dev_err(&netdev->dev,
2320 "Device temperature %d degrees C exceeds"
2321 " maximum allowed. Hardware has been shut down.\n",
2322 temp_val);
2323 rv = 1;
2324 } else if (temp_state == QLCNIC_TEMP_WARN) {
2325 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2326 dev_err(&netdev->dev,
2327 "Device temperature %d degrees C "
2328 "exceeds operating range."
2329 " Immediate action needed.\n",
2330 temp_val);
2331 }
2332 } else {
2333 if (adapter->temp == QLCNIC_TEMP_WARN) {
2334 dev_info(&netdev->dev,
2335 "Device temperature is now %d degrees C"
2336 " in normal range.\n", temp_val);
2337 }
2338 }
2339 adapter->temp = temp_state;
2340 return rv;
2341}
2342
2343void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2344{
2345 struct net_device *netdev = adapter->netdev;
2346
b1fc6d3c 2347 if (adapter->ahw->linkup && !linkup) {
69324275 2348 netdev_info(netdev, "NIC Link is down\n");
b1fc6d3c 2349 adapter->ahw->linkup = 0;
af19b491
AKS
2350 if (netif_running(netdev)) {
2351 netif_carrier_off(netdev);
2352 netif_stop_queue(netdev);
2353 }
b1fc6d3c 2354 } else if (!adapter->ahw->linkup && linkup) {
69324275 2355 netdev_info(netdev, "NIC Link is up\n");
b1fc6d3c 2356 adapter->ahw->linkup = 1;
af19b491
AKS
2357 if (netif_running(netdev)) {
2358 netif_carrier_on(netdev);
2359 netif_wake_queue(netdev);
2360 }
2361 }
2362}
2363
2364static void qlcnic_tx_timeout(struct net_device *netdev)
2365{
2366 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2367
2368 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2369 return;
2370
2371 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2372
2373 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2374 adapter->need_fw_reset = 1;
2375 else
2376 adapter->reset_context = 1;
af19b491
AKS
2377}
2378
2379static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2380{
2381 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2382 struct net_device_stats *stats = &netdev->stats;
2383
af19b491
AKS
2384 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2385 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2386 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2387 stats->tx_bytes = adapter->stats.txbytes;
2388 stats->rx_dropped = adapter->stats.rxdropped;
2389 stats->tx_dropped = adapter->stats.txdropped;
2390
2391 return stats;
2392}
2393
7eb9855d 2394static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2395{
af19b491
AKS
2396 u32 status;
2397
2398 status = readl(adapter->isr_int_vec);
2399
2400 if (!(status & adapter->int_vec_bit))
2401 return IRQ_NONE;
2402
2403 /* check interrupt state machine, to be sure */
2404 status = readl(adapter->crb_int_state_reg);
2405 if (!ISR_LEGACY_INT_TRIGGERED(status))
2406 return IRQ_NONE;
2407
2408 writel(0xffffffff, adapter->tgt_status_reg);
2409 /* read twice to ensure write is flushed */
2410 readl(adapter->isr_int_vec);
2411 readl(adapter->isr_int_vec);
2412
7eb9855d
AKS
2413 return IRQ_HANDLED;
2414}
2415
2416static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2417{
2418 struct qlcnic_host_sds_ring *sds_ring = data;
2419 struct qlcnic_adapter *adapter = sds_ring->adapter;
2420
2421 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2422 goto done;
2423 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2424 writel(0xffffffff, adapter->tgt_status_reg);
2425 goto done;
2426 }
2427
2428 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2429 return IRQ_NONE;
2430
2431done:
2432 adapter->diag_cnt++;
2433 qlcnic_enable_int(sds_ring);
2434 return IRQ_HANDLED;
2435}
2436
2437static irqreturn_t qlcnic_intr(int irq, void *data)
2438{
2439 struct qlcnic_host_sds_ring *sds_ring = data;
2440 struct qlcnic_adapter *adapter = sds_ring->adapter;
2441
2442 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2443 return IRQ_NONE;
2444
af19b491
AKS
2445 napi_schedule(&sds_ring->napi);
2446
2447 return IRQ_HANDLED;
2448}
2449
2450static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2451{
2452 struct qlcnic_host_sds_ring *sds_ring = data;
2453 struct qlcnic_adapter *adapter = sds_ring->adapter;
2454
2455 /* clear interrupt */
2456 writel(0xffffffff, adapter->tgt_status_reg);
2457
2458 napi_schedule(&sds_ring->napi);
2459 return IRQ_HANDLED;
2460}
2461
2462static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2463{
2464 struct qlcnic_host_sds_ring *sds_ring = data;
2465
2466 napi_schedule(&sds_ring->napi);
2467 return IRQ_HANDLED;
2468}
2469
2470static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2471{
2472 u32 sw_consumer, hw_consumer;
2473 int count = 0, i;
2474 struct qlcnic_cmd_buffer *buffer;
2475 struct pci_dev *pdev = adapter->pdev;
2476 struct net_device *netdev = adapter->netdev;
2477 struct qlcnic_skb_frag *frag;
2478 int done;
2479 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2480
2481 if (!spin_trylock(&adapter->tx_clean_lock))
2482 return 1;
2483
2484 sw_consumer = tx_ring->sw_consumer;
2485 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2486
2487 while (sw_consumer != hw_consumer) {
2488 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2489 if (buffer->skb) {
2490 frag = &buffer->frag_array[0];
2491 pci_unmap_single(pdev, frag->dma, frag->length,
2492 PCI_DMA_TODEVICE);
2493 frag->dma = 0ULL;
2494 for (i = 1; i < buffer->frag_count; i++) {
2495 frag++;
2496 pci_unmap_page(pdev, frag->dma, frag->length,
2497 PCI_DMA_TODEVICE);
2498 frag->dma = 0ULL;
2499 }
2500
2501 adapter->stats.xmitfinished++;
2502 dev_kfree_skb_any(buffer->skb);
2503 buffer->skb = NULL;
2504 }
2505
2506 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2507 if (++count >= MAX_STATUS_HANDLE)
2508 break;
2509 }
2510
2511 if (count && netif_running(netdev)) {
2512 tx_ring->sw_consumer = sw_consumer;
2513
2514 smp_mb();
2515
2516 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2517 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2518 netif_wake_queue(netdev);
8bfe8b91 2519 adapter->stats.xmit_on++;
af19b491 2520 }
af19b491 2521 }
ef71ff83 2522 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2523 }
2524 /*
2525 * If everything is freed up to consumer then check if the ring is full
2526 * If the ring is full then check if more needs to be freed and
2527 * schedule the call back again.
2528 *
2529 * This happens when there are 2 CPUs. One could be freeing and the
2530 * other filling it. If the ring is full when we get out of here and
2531 * the card has already interrupted the host then the host can miss the
2532 * interrupt.
2533 *
2534 * There is still a possible race condition and the host could miss an
2535 * interrupt. The card has to take care of this.
2536 */
2537 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2538 done = (sw_consumer == hw_consumer);
2539 spin_unlock(&adapter->tx_clean_lock);
2540
2541 return done;
2542}
2543
2544static int qlcnic_poll(struct napi_struct *napi, int budget)
2545{
2546 struct qlcnic_host_sds_ring *sds_ring =
2547 container_of(napi, struct qlcnic_host_sds_ring, napi);
2548
2549 struct qlcnic_adapter *adapter = sds_ring->adapter;
2550
2551 int tx_complete;
2552 int work_done;
2553
2554 tx_complete = qlcnic_process_cmd_ring(adapter);
2555
2556 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2557
2558 if ((work_done < budget) && tx_complete) {
2559 napi_complete(&sds_ring->napi);
2560 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2561 qlcnic_enable_int(sds_ring);
2562 }
2563
2564 return work_done;
2565}
2566
8f891387 2567static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2568{
2569 struct qlcnic_host_sds_ring *sds_ring =
2570 container_of(napi, struct qlcnic_host_sds_ring, napi);
2571
2572 struct qlcnic_adapter *adapter = sds_ring->adapter;
2573 int work_done;
2574
2575 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2576
2577 if (work_done < budget) {
2578 napi_complete(&sds_ring->napi);
2579 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2580 qlcnic_enable_int(sds_ring);
2581 }
2582
2583 return work_done;
2584}
2585
af19b491
AKS
2586#ifdef CONFIG_NET_POLL_CONTROLLER
2587static void qlcnic_poll_controller(struct net_device *netdev)
2588{
bf82791e
YL
2589 int ring;
2590 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2591 struct qlcnic_adapter *adapter = netdev_priv(netdev);
b1fc6d3c 2592 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
bf82791e 2593
af19b491 2594 disable_irq(adapter->irq);
bf82791e
YL
2595 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2596 sds_ring = &recv_ctx->sds_rings[ring];
2597 qlcnic_intr(adapter->irq, sds_ring);
2598 }
af19b491
AKS
2599 enable_irq(adapter->irq);
2600}
2601#endif
2602
6df900e9
SC
2603static void
2604qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2605{
2606 u32 val;
2607
2608 val = adapter->portnum & 0xf;
2609 val |= encoding << 7;
2610 val |= (jiffies - adapter->dev_rst_time) << 8;
2611
2612 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2613 adapter->dev_rst_time = jiffies;
2614}
2615
ade91f8e
AKS
2616static int
2617qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2618{
2619 u32 val;
2620
2621 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2622 state != QLCNIC_DEV_NEED_QUISCENT);
2623
2624 if (qlcnic_api_lock(adapter))
ade91f8e 2625 return -EIO;
af19b491
AKS
2626
2627 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2628
2629 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2630 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2631 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2632 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2633
2634 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2635
2636 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2637
2638 return 0;
af19b491
AKS
2639}
2640
1b95a839
AKS
2641static int
2642qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2643{
2644 u32 val;
2645
2646 if (qlcnic_api_lock(adapter))
2647 return -EBUSY;
2648
2649 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2650 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2651 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2652
2653 qlcnic_api_unlock(adapter);
2654
2655 return 0;
2656}
2657
af19b491 2658static void
21854f02 2659qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2660{
2661 u32 val;
2662
2663 if (qlcnic_api_lock(adapter))
2664 goto err;
2665
31018e06 2666 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2667 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2668 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2669
21854f02
AKS
2670 if (failed) {
2671 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2672 dev_info(&adapter->pdev->dev,
2673 "Device state set to Failed. Please Reboot\n");
2674 } else if (!(val & 0x11111111))
af19b491
AKS
2675 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2676
2677 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2678 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2679 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2680
2681 qlcnic_api_unlock(adapter);
2682err:
2683 adapter->fw_fail_cnt = 0;
2684 clear_bit(__QLCNIC_START_FW, &adapter->state);
2685 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2686}
2687
f73dfc50 2688/* Grab api lock, before checking state */
af19b491
AKS
2689static int
2690qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2691{
602ca6f0 2692 int act, state, active_mask;
af19b491
AKS
2693
2694 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2695 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491 2696
602ca6f0
SV
2697 if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
2698 active_mask = (~(1 << (adapter->ahw->pci_func * 4)));
2699 act = act & active_mask;
2700 }
2701
af19b491
AKS
2702 if (((state & 0x11111111) == (act & 0x11111111)) ||
2703 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2704 return 0;
2705 else
2706 return 1;
2707}
2708
96f8118c
SC
2709static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2710{
2711 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2712
2713 if (val != QLCNIC_DRV_IDC_VER) {
2714 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2715 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2716 }
2717
2718 return 0;
2719}
2720
af19b491
AKS
2721static int
2722qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2723{
2724 u32 val, prev_state;
aa5e18c0 2725 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2726 u8 portnum = adapter->portnum;
96f8118c 2727 u8 ret;
af19b491 2728
f73dfc50
AKS
2729 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2730 return 1;
2731
af19b491
AKS
2732 if (qlcnic_api_lock(adapter))
2733 return -1;
2734
31018e06 2735 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2736 if (!(val & (1 << (portnum * 4)))) {
2737 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2738 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2739 }
2740
2741 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2742 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2743
2744 switch (prev_state) {
2745 case QLCNIC_DEV_COLD:
bbd8c6a4 2746 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2747 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2748 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2749 qlcnic_api_unlock(adapter);
2750 return 1;
2751
2752 case QLCNIC_DEV_READY:
96f8118c 2753 ret = qlcnic_check_idc_ver(adapter);
af19b491 2754 qlcnic_api_unlock(adapter);
96f8118c 2755 return ret;
af19b491
AKS
2756
2757 case QLCNIC_DEV_NEED_RESET:
2758 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2759 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2760 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2761 break;
2762
2763 case QLCNIC_DEV_NEED_QUISCENT:
2764 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2765 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2766 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2767 break;
2768
2769 case QLCNIC_DEV_FAILED:
a7fc948f 2770 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2771 qlcnic_api_unlock(adapter);
2772 return -1;
bbd8c6a4
AKS
2773
2774 case QLCNIC_DEV_INITIALIZING:
2775 case QLCNIC_DEV_QUISCENT:
2776 break;
af19b491
AKS
2777 }
2778
2779 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2780
2781 do {
af19b491 2782 msleep(1000);
a5e463d0
SC
2783 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2784
2785 if (prev_state == QLCNIC_DEV_QUISCENT)
2786 continue;
2787 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2788
65b5b420
AKS
2789 if (!dev_init_timeo) {
2790 dev_err(&adapter->pdev->dev,
2791 "Waiting for device to initialize timeout\n");
af19b491 2792 return -1;
65b5b420 2793 }
af19b491
AKS
2794
2795 if (qlcnic_api_lock(adapter))
2796 return -1;
2797
2798 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2799 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2800 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2801
96f8118c 2802 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2803 qlcnic_api_unlock(adapter);
2804
96f8118c 2805 return ret;
af19b491
AKS
2806}
2807
2808static void
2809qlcnic_fwinit_work(struct work_struct *work)
2810{
2811 struct qlcnic_adapter *adapter = container_of(work,
2812 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2813 u32 dev_state = 0xf;
7b749ff4 2814 u32 val;
af19b491 2815
f73dfc50
AKS
2816 if (qlcnic_api_lock(adapter))
2817 goto err_ret;
af19b491 2818
a5e463d0 2819 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2820 if (dev_state == QLCNIC_DEV_QUISCENT ||
2821 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2822 qlcnic_api_unlock(adapter);
2823 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2824 FW_POLL_DELAY * 2);
2825 return;
2826 }
2827
9f26f547 2828 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2829 qlcnic_api_unlock(adapter);
2830 goto wait_npar;
9f26f547
AC
2831 }
2832
f73dfc50
AKS
2833 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2834 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2835 adapter->reset_ack_timeo);
2836 goto skip_ack_check;
2837 }
2838
2839 if (!qlcnic_check_drv_state(adapter)) {
2840skip_ack_check:
2841 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2842
f73dfc50
AKS
2843 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2844 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2845 QLCNIC_DEV_INITIALIZING);
2846 set_bit(__QLCNIC_START_FW, &adapter->state);
2847 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2848 qlcnic_idc_debug_info(adapter, 0);
7b749ff4
SV
2849 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2850 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2851 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
af19b491
AKS
2852 }
2853
f73dfc50
AKS
2854 qlcnic_api_unlock(adapter);
2855
287e38aa 2856 rtnl_lock();
7b749ff4
SV
2857 if (adapter->ahw->fw_dump.enable &&
2858 (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
9d6a6440
AC
2859 QLCDB(adapter, DRV, "Take FW dump\n");
2860 qlcnic_dump_fw(adapter);
9d6a6440 2861 }
287e38aa 2862 rtnl_unlock();
7b749ff4
SV
2863
2864 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
9f26f547 2865 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2866 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2867 adapter->fw_wait_cnt = 0;
af19b491
AKS
2868 return;
2869 }
af19b491
AKS
2870 goto err_ret;
2871 }
2872
f73dfc50 2873 qlcnic_api_unlock(adapter);
aa5e18c0 2874
9f26f547 2875wait_npar:
af19b491 2876 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2877 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2878
af19b491 2879 switch (dev_state) {
3c4b23b1 2880 case QLCNIC_DEV_READY:
9f26f547 2881 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2882 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2883 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2884 return;
2885 }
3c4b23b1
AKS
2886 case QLCNIC_DEV_FAILED:
2887 break;
2888 default:
2889 qlcnic_schedule_work(adapter,
2890 qlcnic_fwinit_work, FW_POLL_DELAY);
2891 return;
af19b491
AKS
2892 }
2893
2894err_ret:
f73dfc50
AKS
2895 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2896 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2897 netif_device_attach(adapter->netdev);
21854f02 2898 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2899}
2900
2901static void
2902qlcnic_detach_work(struct work_struct *work)
2903{
2904 struct qlcnic_adapter *adapter = container_of(work,
2905 struct qlcnic_adapter, fw_work.work);
2906 struct net_device *netdev = adapter->netdev;
2907 u32 status;
2908
2909 netif_device_detach(netdev);
2910
b8c17620
AKS
2911 /* Dont grab rtnl lock during Quiscent mode */
2912 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2913 if (netif_running(netdev))
2914 __qlcnic_down(adapter, netdev);
2915 } else
2916 qlcnic_down(adapter, netdev);
af19b491 2917
af19b491
AKS
2918 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2919
2920 if (status & QLCNIC_RCODE_FATAL_ERROR)
2921 goto err_ret;
2922
2923 if (adapter->temp == QLCNIC_TEMP_PANIC)
2924 goto err_ret;
602ca6f0
SV
2925 /* Dont ack if this instance is the reset owner */
2926 if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
2927 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2928 goto err_ret;
2929 }
af19b491
AKS
2930
2931 adapter->fw_wait_cnt = 0;
2932
2933 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2934
2935 return;
2936
2937err_ret:
65b5b420
AKS
2938 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2939 status, adapter->temp);
34ce3626 2940 netif_device_attach(netdev);
21854f02 2941 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2942}
2943
3c4b23b1
AKS
2944/*Transit NPAR state to NON Operational */
2945static void
2946qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2947{
2948 u32 state;
2949
2950 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2951 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2952 return;
2953
2954 if (qlcnic_api_lock(adapter))
2955 return;
2956 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2957 qlcnic_api_unlock(adapter);
2958}
2959
f73dfc50 2960/*Transit to RESET state from READY state only */
18f2f616 2961void
af19b491
AKS
2962qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2963{
2964 u32 state;
2965
cea8975e 2966 adapter->need_fw_reset = 1;
af19b491
AKS
2967 if (qlcnic_api_lock(adapter))
2968 return;
2969
2970 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2971
f73dfc50 2972 if (state == QLCNIC_DEV_READY) {
af19b491 2973 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
602ca6f0 2974 adapter->flags |= QLCNIC_FW_RESET_OWNER;
65b5b420 2975 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2976 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2977 }
2978
3c4b23b1 2979 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2980 qlcnic_api_unlock(adapter);
2981}
2982
9f26f547
AC
2983/* Transit to NPAR READY state from NPAR NOT READY state */
2984static void
2985qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2986{
9f26f547
AC
2987 if (qlcnic_api_lock(adapter))
2988 return;
2989
3c4b23b1
AKS
2990 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2991 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2992
2993 qlcnic_api_unlock(adapter);
2994}
2995
af19b491
AKS
2996static void
2997qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2998 work_func_t func, int delay)
2999{
451724c8
SC
3000 if (test_bit(__QLCNIC_AER, &adapter->state))
3001 return;
3002
af19b491 3003 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
3004 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
3005 round_jiffies_relative(delay));
af19b491
AKS
3006}
3007
3008static void
3009qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3010{
3011 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3012 msleep(10);
3013
3014 cancel_delayed_work_sync(&adapter->fw_work);
3015}
3016
3017static void
3018qlcnic_attach_work(struct work_struct *work)
3019{
3020 struct qlcnic_adapter *adapter = container_of(work,
3021 struct qlcnic_adapter, fw_work.work);
3022 struct net_device *netdev = adapter->netdev;
b18971d1 3023 u32 npar_state;
af19b491 3024
b18971d1
AKS
3025 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
3026 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
3027 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
3028 qlcnic_clr_all_drv_state(adapter, 0);
3029 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
3030 qlcnic_schedule_work(adapter, qlcnic_attach_work,
3031 FW_POLL_DELAY);
3032 else
3033 goto attach;
3034 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
3035 return;
3036 }
3037attach:
af19b491 3038 if (netif_running(netdev)) {
52486a3a 3039 if (qlcnic_up(adapter, netdev))
af19b491 3040 goto done;
af19b491 3041
aec1e845 3042 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
3043 }
3044
af19b491 3045done:
34ce3626 3046 netif_device_attach(netdev);
af19b491
AKS
3047 adapter->fw_fail_cnt = 0;
3048 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
3049
3050 if (!qlcnic_clr_drv_state(adapter))
3051 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3052 FW_POLL_DELAY);
af19b491
AKS
3053}
3054
3055static int
3056qlcnic_check_health(struct qlcnic_adapter *adapter)
3057{
4e70812b 3058 u32 state = 0, heartbeat;
af19b491
AKS
3059 struct net_device *netdev = adapter->netdev;
3060
3061 if (qlcnic_check_temp(adapter))
3062 goto detach;
3063
2372a5f1 3064 if (adapter->need_fw_reset)
af19b491 3065 qlcnic_dev_request_reset(adapter);
af19b491
AKS
3066
3067 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 3068 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 3069 qlcnic_set_npar_non_operational(adapter);
af19b491 3070 adapter->need_fw_reset = 1;
b8c17620
AKS
3071 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3072 goto detach;
af19b491 3073
4e70812b
SC
3074 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3075 if (heartbeat != adapter->heartbeat) {
3076 adapter->heartbeat = heartbeat;
af19b491
AKS
3077 adapter->fw_fail_cnt = 0;
3078 if (adapter->need_fw_reset)
3079 goto detach;
68bf1c68 3080
9ce13ca8 3081 if (adapter->reset_context && auto_fw_reset) {
68bf1c68
AKS
3082 qlcnic_reset_hw_context(adapter);
3083 adapter->netdev->trans_start = jiffies;
3084 }
3085
af19b491
AKS
3086 return 0;
3087 }
3088
3089 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3090 return 0;
3091
3092 qlcnic_dev_request_reset(adapter);
3093
9ce13ca8 3094 if (auto_fw_reset)
0df170b6 3095 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3096
3097 dev_info(&netdev->dev, "firmware hang detected\n");
3098
3099detach:
3100 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3101 QLCNIC_DEV_NEED_RESET;
3102
9ce13ca8 3103 if (auto_fw_reset &&
65b5b420
AKS
3104 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3105
af19b491 3106 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3107 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3108 }
af19b491
AKS
3109
3110 return 1;
3111}
3112
3113static void
3114qlcnic_fw_poll_work(struct work_struct *work)
3115{
3116 struct qlcnic_adapter *adapter = container_of(work,
3117 struct qlcnic_adapter, fw_work.work);
3118
3119 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3120 goto reschedule;
3121
3122
3123 if (qlcnic_check_health(adapter))
3124 return;
3125
b5e5492c
AKS
3126 if (adapter->fhash.fnum)
3127 qlcnic_prune_lb_filters(adapter);
3128
af19b491
AKS
3129reschedule:
3130 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3131}
3132
451724c8
SC
3133static int qlcnic_is_first_func(struct pci_dev *pdev)
3134{
3135 struct pci_dev *oth_pdev;
3136 int val = pdev->devfn;
3137
3138 while (val-- > 0) {
3139 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3140 (pdev->bus), pdev->bus->number,
3141 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3142 if (!oth_pdev)
3143 continue;
451724c8 3144
bfc978fa
AKS
3145 if (oth_pdev->current_state != PCI_D3cold) {
3146 pci_dev_put(oth_pdev);
451724c8 3147 return 0;
bfc978fa
AKS
3148 }
3149 pci_dev_put(oth_pdev);
451724c8
SC
3150 }
3151 return 1;
3152}
3153
3154static int qlcnic_attach_func(struct pci_dev *pdev)
3155{
3156 int err, first_func;
3157 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3158 struct net_device *netdev = adapter->netdev;
3159
3160 pdev->error_state = pci_channel_io_normal;
3161
3162 err = pci_enable_device(pdev);
3163 if (err)
3164 return err;
3165
3166 pci_set_power_state(pdev, PCI_D0);
3167 pci_set_master(pdev);
3168 pci_restore_state(pdev);
3169
3170 first_func = qlcnic_is_first_func(pdev);
3171
3172 if (qlcnic_api_lock(adapter))
3173 return -EINVAL;
3174
933fce12 3175 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3176 adapter->need_fw_reset = 1;
3177 set_bit(__QLCNIC_START_FW, &adapter->state);
3178 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3179 QLCDB(adapter, DRV, "Restarting fw\n");
3180 }
3181 qlcnic_api_unlock(adapter);
3182
3183 err = adapter->nic_ops->start_firmware(adapter);
3184 if (err)
3185 return err;
3186
3187 qlcnic_clr_drv_state(adapter);
3188 qlcnic_setup_intr(adapter);
3189
3190 if (netif_running(netdev)) {
3191 err = qlcnic_attach(adapter);
3192 if (err) {
21854f02 3193 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3194 clear_bit(__QLCNIC_AER, &adapter->state);
3195 netif_device_attach(netdev);
3196 return err;
3197 }
3198
3199 err = qlcnic_up(adapter, netdev);
3200 if (err)
3201 goto done;
3202
aec1e845 3203 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3204 }
3205 done:
3206 netif_device_attach(netdev);
3207 return err;
3208}
3209
3210static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3211 pci_channel_state_t state)
3212{
3213 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3214 struct net_device *netdev = adapter->netdev;
3215
3216 if (state == pci_channel_io_perm_failure)
3217 return PCI_ERS_RESULT_DISCONNECT;
3218
3219 if (state == pci_channel_io_normal)
3220 return PCI_ERS_RESULT_RECOVERED;
3221
3222 set_bit(__QLCNIC_AER, &adapter->state);
3223 netif_device_detach(netdev);
3224
3225 cancel_delayed_work_sync(&adapter->fw_work);
3226
3227 if (netif_running(netdev))
3228 qlcnic_down(adapter, netdev);
3229
3230 qlcnic_detach(adapter);
3231 qlcnic_teardown_intr(adapter);
3232
3233 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3234
3235 pci_save_state(pdev);
3236 pci_disable_device(pdev);
3237
3238 return PCI_ERS_RESULT_NEED_RESET;
3239}
3240
3241static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3242{
3243 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3244 PCI_ERS_RESULT_RECOVERED;
3245}
3246
3247static void qlcnic_io_resume(struct pci_dev *pdev)
3248{
3249 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3250
3251 pci_cleanup_aer_uncorrect_error_status(pdev);
3252
3253 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3254 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3255 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3256 FW_POLL_DELAY);
3257}
3258
87eb743b
AC
3259static int
3260qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3261{
3262 int err;
3263
3264 err = qlcnic_can_start_firmware(adapter);
3265 if (err)
3266 return err;
3267
78f84e1a
AKS
3268 err = qlcnic_check_npar_opertional(adapter);
3269 if (err)
3270 return err;
3c4b23b1 3271
174240a8
RB
3272 err = qlcnic_initialize_nic(adapter);
3273 if (err)
3274 return err;
3275
87eb743b
AC
3276 qlcnic_check_options(adapter);
3277
7373373d
RB
3278 err = qlcnic_set_eswitch_port_config(adapter);
3279 if (err)
3280 return err;
3281
87eb743b
AC
3282 adapter->need_fw_reset = 0;
3283
3284 return err;
3285}
3286
3287static int
3288qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3289{
3290 return -EOPNOTSUPP;
3291}
3292
3293static int
3294qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3295{
3296 return -EOPNOTSUPP;
3297}
3298
af19b491
AKS
3299static ssize_t
3300qlcnic_store_bridged_mode(struct device *dev,
3301 struct device_attribute *attr, const char *buf, size_t len)
3302{
3303 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3304 unsigned long new;
3305 int ret = -EINVAL;
3306
3307 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3308 goto err_out;
3309
8a15ad1f 3310 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3311 goto err_out;
3312
3313 if (strict_strtoul(buf, 2, &new))
3314 goto err_out;
3315
2e9d722d 3316 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3317 ret = len;
3318
3319err_out:
3320 return ret;
3321}
3322
3323static ssize_t
3324qlcnic_show_bridged_mode(struct device *dev,
3325 struct device_attribute *attr, char *buf)
3326{
3327 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3328 int bridged_mode = 0;
3329
3330 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3331 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3332
3333 return sprintf(buf, "%d\n", bridged_mode);
3334}
3335
3336static struct device_attribute dev_attr_bridged_mode = {
3337 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3338 .show = qlcnic_show_bridged_mode,
3339 .store = qlcnic_store_bridged_mode,
3340};
3341
3342static ssize_t
3343qlcnic_store_diag_mode(struct device *dev,
3344 struct device_attribute *attr, const char *buf, size_t len)
3345{
3346 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3347 unsigned long new;
3348
3349 if (strict_strtoul(buf, 2, &new))
3350 return -EINVAL;
3351
3352 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3353 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3354
3355 return len;
3356}
3357
3358static ssize_t
3359qlcnic_show_diag_mode(struct device *dev,
3360 struct device_attribute *attr, char *buf)
3361{
3362 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3363
3364 return sprintf(buf, "%d\n",
3365 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3366}
3367
3368static struct device_attribute dev_attr_diag_mode = {
3369 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3370 .show = qlcnic_show_diag_mode,
3371 .store = qlcnic_store_diag_mode,
3372};
3373
f94bc1e7
SC
3374int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
3375{
3376 if (!use_msi_x && !use_msi) {
3377 netdev_info(netdev, "no msix or msi support, hence no rss\n");
3378 return -EINVAL;
3379 }
3380
3381 if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
3382 netdev_info(netdev, "rss_ring valid range [2 - %x] in "
3383 " powers of 2\n", max_hw);
3384 return -EINVAL;
3385 }
3386 return 0;
3387
3388}
3389
3390int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
3391{
3392 struct net_device *netdev = adapter->netdev;
3393 int err = 0;
3394
3395 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3396 return -EBUSY;
3397
3398 netif_device_detach(netdev);
3399 if (netif_running(netdev))
3400 __qlcnic_down(adapter, netdev);
3401 qlcnic_detach(adapter);
3402 qlcnic_teardown_intr(adapter);
3403
3404 if (qlcnic_enable_msix(adapter, data)) {
3405 netdev_info(netdev, "failed setting max_rss; rss disabled\n");
3406 qlcnic_enable_msi_legacy(adapter);
3407 }
3408
3409 if (netif_running(netdev)) {
3410 err = qlcnic_attach(adapter);
3411 if (err)
3412 goto done;
3413 err = __qlcnic_up(adapter, netdev);
3414 if (err)
3415 goto done;
3416 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
3417 }
3418 done:
3419 netif_device_attach(netdev);
3420 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3421 return err;
3422}
3423
af19b491
AKS
3424static int
3425qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3426 loff_t offset, size_t size)
3427{
897e8c7c
DP
3428 size_t crb_size = 4;
3429
af19b491
AKS
3430 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3431 return -EIO;
3432
897e8c7c
DP
3433 if (offset < QLCNIC_PCI_CRBSPACE) {
3434 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3435 QLCNIC_PCI_CAMQM_END))
3436 crb_size = 8;
3437 else
3438 return -EINVAL;
3439 }
af19b491 3440
897e8c7c
DP
3441 if ((size != crb_size) || (offset & (crb_size-1)))
3442 return -EINVAL;
af19b491
AKS
3443
3444 return 0;
3445}
3446
3447static ssize_t
2c3c8bea
CW
3448qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3449 struct bin_attribute *attr,
af19b491
AKS
3450 char *buf, loff_t offset, size_t size)
3451{
3452 struct device *dev = container_of(kobj, struct device, kobj);
3453 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3454 u32 data;
897e8c7c 3455 u64 qmdata;
af19b491
AKS
3456 int ret;
3457
3458 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3459 if (ret != 0)
3460 return ret;
3461
897e8c7c
DP
3462 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3463 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3464 memcpy(buf, &qmdata, size);
3465 } else {
3466 data = QLCRD32(adapter, offset);
3467 memcpy(buf, &data, size);
3468 }
af19b491
AKS
3469 return size;
3470}
3471
3472static ssize_t
2c3c8bea
CW
3473qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3474 struct bin_attribute *attr,
af19b491
AKS
3475 char *buf, loff_t offset, size_t size)
3476{
3477 struct device *dev = container_of(kobj, struct device, kobj);
3478 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3479 u32 data;
897e8c7c 3480 u64 qmdata;
af19b491
AKS
3481 int ret;
3482
3483 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3484 if (ret != 0)
3485 return ret;
3486
897e8c7c
DP
3487 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3488 memcpy(&qmdata, buf, size);
3489 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3490 } else {
3491 memcpy(&data, buf, size);
3492 QLCWR32(adapter, offset, data);
3493 }
af19b491
AKS
3494 return size;
3495}
3496
3497static int
3498qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3499 loff_t offset, size_t size)
3500{
3501 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3502 return -EIO;
3503
3504 if ((size != 8) || (offset & 0x7))
3505 return -EIO;
3506
3507 return 0;
3508}
3509
3510static ssize_t
2c3c8bea
CW
3511qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3512 struct bin_attribute *attr,
af19b491
AKS
3513 char *buf, loff_t offset, size_t size)
3514{
3515 struct device *dev = container_of(kobj, struct device, kobj);
3516 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3517 u64 data;
3518 int ret;
3519
3520 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3521 if (ret != 0)
3522 return ret;
3523
3524 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3525 return -EIO;
3526
3527 memcpy(buf, &data, size);
3528
3529 return size;
3530}
3531
3532static ssize_t
2c3c8bea
CW
3533qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3534 struct bin_attribute *attr,
af19b491
AKS
3535 char *buf, loff_t offset, size_t size)
3536{
3537 struct device *dev = container_of(kobj, struct device, kobj);
3538 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3539 u64 data;
3540 int ret;
3541
3542 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3543 if (ret != 0)
3544 return ret;
3545
3546 memcpy(&data, buf, size);
3547
3548 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3549 return -EIO;
3550
3551 return size;
3552}
3553
af19b491
AKS
3554static struct bin_attribute bin_attr_crb = {
3555 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3556 .size = 0,
3557 .read = qlcnic_sysfs_read_crb,
3558 .write = qlcnic_sysfs_write_crb,
3559};
3560
3561static struct bin_attribute bin_attr_mem = {
3562 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3563 .size = 0,
3564 .read = qlcnic_sysfs_read_mem,
3565 .write = qlcnic_sysfs_write_mem,
3566};
3567
cea8975e 3568static int
346fe763
RB
3569validate_pm_config(struct qlcnic_adapter *adapter,
3570 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3571{
3572
3573 u8 src_pci_func, s_esw_id, d_esw_id;
3574 u8 dest_pci_func;
3575 int i;
3576
3577 for (i = 0; i < count; i++) {
3578 src_pci_func = pm_cfg[i].pci_func;
3579 dest_pci_func = pm_cfg[i].dest_npar;
3580 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3581 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3582 return QL_STATUS_INVALID_PARAM;
3583
3584 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3585 return QL_STATUS_INVALID_PARAM;
3586
3587 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3588 return QL_STATUS_INVALID_PARAM;
3589
346fe763
RB
3590 s_esw_id = adapter->npars[src_pci_func].phy_port;
3591 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3592
3593 if (s_esw_id != d_esw_id)
3594 return QL_STATUS_INVALID_PARAM;
3595
3596 }
3597 return 0;
3598
3599}
3600
3601static ssize_t
3602qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3603 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3604{
3605 struct device *dev = container_of(kobj, struct device, kobj);
3606 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3607 struct qlcnic_pm_func_cfg *pm_cfg;
3608 u32 id, action, pci_func;
3609 int count, rem, i, ret;
3610
3611 count = size / sizeof(struct qlcnic_pm_func_cfg);
3612 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3613 if (rem)
3614 return QL_STATUS_INVALID_PARAM;
3615
3616 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3617
3618 ret = validate_pm_config(adapter, pm_cfg, count);
3619 if (ret)
3620 return ret;
3621 for (i = 0; i < count; i++) {
3622 pci_func = pm_cfg[i].pci_func;
4e8acb01 3623 action = !!pm_cfg[i].action;
346fe763
RB
3624 id = adapter->npars[pci_func].phy_port;
3625 ret = qlcnic_config_port_mirroring(adapter, id,
3626 action, pci_func);
3627 if (ret)
3628 return ret;
3629 }
3630
3631 for (i = 0; i < count; i++) {
3632 pci_func = pm_cfg[i].pci_func;
3633 id = adapter->npars[pci_func].phy_port;
4e8acb01 3634 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3635 adapter->npars[pci_func].dest_npar = id;
3636 }
3637 return size;
3638}
3639
3640static ssize_t
3641qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3642 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3643{
3644 struct device *dev = container_of(kobj, struct device, kobj);
3645 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3646 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3647 int i;
3648
3649 if (size != sizeof(pm_cfg))
3650 return QL_STATUS_INVALID_PARAM;
3651
3652 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3653 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3654 continue;
3655 pm_cfg[i].action = adapter->npars[i].enable_pm;
3656 pm_cfg[i].dest_npar = 0;
3657 pm_cfg[i].pci_func = i;
3658 }
3659 memcpy(buf, &pm_cfg, size);
3660
3661 return size;
3662}
3663
cea8975e 3664static int
346fe763 3665validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3666 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3667{
7613c87b 3668 u32 op_mode;
346fe763
RB
3669 u8 pci_func;
3670 int i;
7613c87b 3671
b1fc6d3c 3672 op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
7613c87b 3673
346fe763
RB
3674 for (i = 0; i < count; i++) {
3675 pci_func = esw_cfg[i].pci_func;
3676 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3677 return QL_STATUS_INVALID_PARAM;
3678
4e8acb01
RB
3679 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3680 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3681 return QL_STATUS_INVALID_PARAM;
346fe763 3682
4e8acb01
RB
3683 switch (esw_cfg[i].op_mode) {
3684 case QLCNIC_PORT_DEFAULTS:
7613c87b 3685 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3686 QLCNIC_NON_PRIV_FUNC) {
091056b2
AKS
3687 if (esw_cfg[i].mac_anti_spoof != 0)
3688 return QL_STATUS_INVALID_PARAM;
3689 if (esw_cfg[i].mac_override != 1)
3690 return QL_STATUS_INVALID_PARAM;
3691 if (esw_cfg[i].promisc_mode != 1)
3692 return QL_STATUS_INVALID_PARAM;
7373373d 3693 }
4e8acb01
RB
3694 break;
3695 case QLCNIC_ADD_VLAN:
346fe763
RB
3696 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3697 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3698 if (!esw_cfg[i].op_type)
3699 return QL_STATUS_INVALID_PARAM;
3700 break;
3701 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3702 if (!esw_cfg[i].op_type)
3703 return QL_STATUS_INVALID_PARAM;
3704 break;
3705 default:
346fe763 3706 return QL_STATUS_INVALID_PARAM;
4e8acb01 3707 }
346fe763 3708 }
346fe763
RB
3709 return 0;
3710}
3711
3712static ssize_t
3713qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3714 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3715{
3716 struct device *dev = container_of(kobj, struct device, kobj);
3717 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3718 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3719 struct qlcnic_npar_info *npar;
346fe763 3720 int count, rem, i, ret;
0325d69b 3721 u8 pci_func, op_mode = 0;
346fe763
RB
3722
3723 count = size / sizeof(struct qlcnic_esw_func_cfg);
3724 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3725 if (rem)
3726 return QL_STATUS_INVALID_PARAM;
3727
3728 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3729 ret = validate_esw_config(adapter, esw_cfg, count);
3730 if (ret)
3731 return ret;
3732
3733 for (i = 0; i < count; i++) {
0325d69b
RB
3734 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3735 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3736 return QL_STATUS_INVALID_PARAM;
e9a47700 3737
b1fc6d3c 3738 if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
e9a47700
RB
3739 continue;
3740
3741 op_mode = esw_cfg[i].op_mode;
3742 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3743 esw_cfg[i].op_mode = op_mode;
b1fc6d3c 3744 esw_cfg[i].pci_func = adapter->ahw->pci_func;
e9a47700
RB
3745
3746 switch (esw_cfg[i].op_mode) {
3747 case QLCNIC_PORT_DEFAULTS:
3748 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3749 break;
8cf61f89
AKS
3750 case QLCNIC_ADD_VLAN:
3751 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3752 break;
3753 case QLCNIC_DEL_VLAN:
3754 esw_cfg[i].vlan_id = 0;
3755 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3756 break;
0325d69b 3757 }
346fe763
RB
3758 }
3759
0325d69b
RB
3760 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3761 goto out;
e9a47700 3762
346fe763
RB
3763 for (i = 0; i < count; i++) {
3764 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3765 npar = &adapter->npars[pci_func];
3766 switch (esw_cfg[i].op_mode) {
3767 case QLCNIC_PORT_DEFAULTS:
3768 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3769 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3770 npar->offload_flags = esw_cfg[i].offload_flags;
3771 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3772 npar->discard_tagged = esw_cfg[i].discard_tagged;
3773 break;
3774 case QLCNIC_ADD_VLAN:
3775 npar->pvid = esw_cfg[i].vlan_id;
3776 break;
3777 case QLCNIC_DEL_VLAN:
3778 npar->pvid = 0;
3779 break;
3780 }
346fe763 3781 }
0325d69b 3782out:
346fe763
RB
3783 return size;
3784}
3785
3786static ssize_t
3787qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3788 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3789{
3790 struct device *dev = container_of(kobj, struct device, kobj);
3791 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3792 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3793 u8 i;
346fe763
RB
3794
3795 if (size != sizeof(esw_cfg))
3796 return QL_STATUS_INVALID_PARAM;
3797
3798 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3799 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3800 continue;
4e8acb01
RB
3801 esw_cfg[i].pci_func = i;
3802 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3803 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3804 }
3805 memcpy(buf, &esw_cfg, size);
3806
3807 return size;
3808}
3809
cea8975e 3810static int
346fe763
RB
3811validate_npar_config(struct qlcnic_adapter *adapter,
3812 struct qlcnic_npar_func_cfg *np_cfg, int count)
3813{
3814 u8 pci_func, i;
3815
3816 for (i = 0; i < count; i++) {
3817 pci_func = np_cfg[i].pci_func;
3818 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3819 return QL_STATUS_INVALID_PARAM;
3820
3821 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3822 return QL_STATUS_INVALID_PARAM;
3823
d12b0d9a
RB
3824 if (!IS_VALID_BW(np_cfg[i].min_bw) ||
3825 !IS_VALID_BW(np_cfg[i].max_bw))
346fe763
RB
3826 return QL_STATUS_INVALID_PARAM;
3827 }
3828 return 0;
3829}
3830
3831static ssize_t
3832qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3833 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3834{
3835 struct device *dev = container_of(kobj, struct device, kobj);
3836 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3837 struct qlcnic_info nic_info;
3838 struct qlcnic_npar_func_cfg *np_cfg;
3839 int i, count, rem, ret;
3840 u8 pci_func;
3841
3842 count = size / sizeof(struct qlcnic_npar_func_cfg);
3843 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3844 if (rem)
3845 return QL_STATUS_INVALID_PARAM;
3846
3847 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3848 ret = validate_npar_config(adapter, np_cfg, count);
3849 if (ret)
3850 return ret;
3851
3852 for (i = 0; i < count ; i++) {
3853 pci_func = np_cfg[i].pci_func;
3854 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3855 if (ret)
3856 return ret;
3857 nic_info.pci_func = pci_func;
3858 nic_info.min_tx_bw = np_cfg[i].min_bw;
3859 nic_info.max_tx_bw = np_cfg[i].max_bw;
3860 ret = qlcnic_set_nic_info(adapter, &nic_info);
3861 if (ret)
3862 return ret;
cea8975e
AC
3863 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3864 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3865 }
3866
3867 return size;
3868
3869}
3870static ssize_t
3871qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3872 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3873{
3874 struct device *dev = container_of(kobj, struct device, kobj);
3875 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3876 struct qlcnic_info nic_info;
3877 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3878 int i, ret;
3879
3880 if (size != sizeof(np_cfg))
3881 return QL_STATUS_INVALID_PARAM;
3882
3883 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3884 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3885 continue;
3886 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3887 if (ret)
3888 return ret;
3889
3890 np_cfg[i].pci_func = i;
a1c0c459 3891 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3892 np_cfg[i].port_num = nic_info.phys_port;
3893 np_cfg[i].fw_capab = nic_info.capabilities;
3894 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3895 np_cfg[i].max_bw = nic_info.max_tx_bw;
3896 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3897 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3898 }
3899 memcpy(buf, &np_cfg, size);
3900 return size;
3901}
3902
b6021212
AKS
3903static ssize_t
3904qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3905 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3906{
3907 struct device *dev = container_of(kobj, struct device, kobj);
3908 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3909 struct qlcnic_esw_statistics port_stats;
3910 int ret;
3911
3912 if (size != sizeof(struct qlcnic_esw_statistics))
3913 return QL_STATUS_INVALID_PARAM;
3914
3915 if (offset >= QLCNIC_MAX_PCI_FUNC)
3916 return QL_STATUS_INVALID_PARAM;
3917
3918 memset(&port_stats, 0, size);
3919 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3920 &port_stats.rx);
3921 if (ret)
3922 return ret;
3923
3924 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3925 &port_stats.tx);
3926 if (ret)
3927 return ret;
3928
3929 memcpy(buf, &port_stats, size);
3930 return size;
3931}
3932
3933static ssize_t
3934qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3935 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3936{
3937 struct device *dev = container_of(kobj, struct device, kobj);
3938 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3939 struct qlcnic_esw_statistics esw_stats;
3940 int ret;
3941
3942 if (size != sizeof(struct qlcnic_esw_statistics))
3943 return QL_STATUS_INVALID_PARAM;
3944
3945 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3946 return QL_STATUS_INVALID_PARAM;
3947
3948 memset(&esw_stats, 0, size);
3949 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3950 &esw_stats.rx);
3951 if (ret)
3952 return ret;
3953
3954 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3955 &esw_stats.tx);
3956 if (ret)
3957 return ret;
3958
3959 memcpy(buf, &esw_stats, size);
3960 return size;
3961}
3962
3963static ssize_t
3964qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3965 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3966{
3967 struct device *dev = container_of(kobj, struct device, kobj);
3968 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3969 int ret;
3970
3971 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3972 return QL_STATUS_INVALID_PARAM;
3973
3974 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3975 QLCNIC_QUERY_RX_COUNTER);
3976 if (ret)
3977 return ret;
3978
3979 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3980 QLCNIC_QUERY_TX_COUNTER);
3981 if (ret)
3982 return ret;
3983
3984 return size;
3985}
3986
3987static ssize_t
3988qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3989 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3990{
3991
3992 struct device *dev = container_of(kobj, struct device, kobj);
3993 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3994 int ret;
3995
3996 if (offset >= QLCNIC_MAX_PCI_FUNC)
3997 return QL_STATUS_INVALID_PARAM;
3998
3999 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4000 QLCNIC_QUERY_RX_COUNTER);
4001 if (ret)
4002 return ret;
4003
4004 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
4005 QLCNIC_QUERY_TX_COUNTER);
4006 if (ret)
4007 return ret;
4008
4009 return size;
4010}
4011
346fe763
RB
4012static ssize_t
4013qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
4014 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
4015{
4016 struct device *dev = container_of(kobj, struct device, kobj);
4017 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
4018 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 4019 struct qlcnic_pci_info *pci_info;
346fe763
RB
4020 int i, ret;
4021
4022 if (size != sizeof(pci_cfg))
4023 return QL_STATUS_INVALID_PARAM;
4024
e88db3bd
DC
4025 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
4026 if (!pci_info)
4027 return -ENOMEM;
4028
346fe763 4029 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
4030 if (ret) {
4031 kfree(pci_info);
346fe763 4032 return ret;
e88db3bd 4033 }
346fe763
RB
4034
4035 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
4036 pci_cfg[i].pci_func = pci_info[i].id;
4037 pci_cfg[i].func_type = pci_info[i].type;
4038 pci_cfg[i].port_num = pci_info[i].default_port;
4039 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
4040 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
4041 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
4042 }
4043 memcpy(buf, &pci_cfg, size);
e88db3bd 4044 kfree(pci_info);
346fe763 4045 return size;
346fe763
RB
4046}
4047static struct bin_attribute bin_attr_npar_config = {
4048 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
4049 .size = 0,
4050 .read = qlcnic_sysfs_read_npar_config,
4051 .write = qlcnic_sysfs_write_npar_config,
4052};
4053
4054static struct bin_attribute bin_attr_pci_config = {
4055 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
4056 .size = 0,
4057 .read = qlcnic_sysfs_read_pci_config,
4058 .write = NULL,
4059};
4060
b6021212
AKS
4061static struct bin_attribute bin_attr_port_stats = {
4062 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
4063 .size = 0,
4064 .read = qlcnic_sysfs_get_port_stats,
4065 .write = qlcnic_sysfs_clear_port_stats,
4066};
4067
4068static struct bin_attribute bin_attr_esw_stats = {
4069 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
4070 .size = 0,
4071 .read = qlcnic_sysfs_get_esw_stats,
4072 .write = qlcnic_sysfs_clear_esw_stats,
4073};
4074
346fe763
RB
4075static struct bin_attribute bin_attr_esw_config = {
4076 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
4077 .size = 0,
4078 .read = qlcnic_sysfs_read_esw_config,
4079 .write = qlcnic_sysfs_write_esw_config,
4080};
4081
4082static struct bin_attribute bin_attr_pm_config = {
4083 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
4084 .size = 0,
4085 .read = qlcnic_sysfs_read_pm_config,
4086 .write = qlcnic_sysfs_write_pm_config,
4087};
4088
af19b491
AKS
4089static void
4090qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
4091{
4092 struct device *dev = &adapter->pdev->dev;
4093
4094 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4095 if (device_create_file(dev, &dev_attr_bridged_mode))
4096 dev_warn(dev,
4097 "failed to create bridged_mode sysfs entry\n");
4098}
4099
4100static void
4101qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4102{
4103 struct device *dev = &adapter->pdev->dev;
4104
4105 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4106 device_remove_file(dev, &dev_attr_bridged_mode);
4107}
4108
4109static void
4110qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4111{
4112 struct device *dev = &adapter->pdev->dev;
4113
b6021212
AKS
4114 if (device_create_bin_file(dev, &bin_attr_port_stats))
4115 dev_info(dev, "failed to create port stats sysfs entry");
4116
132ff00a
AC
4117 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4118 return;
af19b491
AKS
4119 if (device_create_file(dev, &dev_attr_diag_mode))
4120 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4121 if (device_create_bin_file(dev, &bin_attr_crb))
4122 dev_info(dev, "failed to create crb sysfs entry\n");
4123 if (device_create_bin_file(dev, &bin_attr_mem))
4124 dev_info(dev, "failed to create mem sysfs entry\n");
53478fef
SC
4125 if (device_create_bin_file(dev, &bin_attr_pci_config))
4126 dev_info(dev, "failed to create pci config sysfs entry");
4e8acb01
RB
4127 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4128 return;
4129 if (device_create_bin_file(dev, &bin_attr_esw_config))
4130 dev_info(dev, "failed to create esw config sysfs entry");
4131 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4132 return;
346fe763
RB
4133 if (device_create_bin_file(dev, &bin_attr_npar_config))
4134 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4135 if (device_create_bin_file(dev, &bin_attr_pm_config))
4136 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4137 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4138 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4139}
4140
af19b491
AKS
4141static void
4142qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4143{
4144 struct device *dev = &adapter->pdev->dev;
4145
b6021212
AKS
4146 device_remove_bin_file(dev, &bin_attr_port_stats);
4147
132ff00a
AC
4148 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4149 return;
af19b491
AKS
4150 device_remove_file(dev, &dev_attr_diag_mode);
4151 device_remove_bin_file(dev, &bin_attr_crb);
4152 device_remove_bin_file(dev, &bin_attr_mem);
53478fef 4153 device_remove_bin_file(dev, &bin_attr_pci_config);
4e8acb01
RB
4154 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4155 return;
4156 device_remove_bin_file(dev, &bin_attr_esw_config);
4157 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763 4158 return;
346fe763 4159 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4160 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4161 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4162}
4163
4164#ifdef CONFIG_INET
4165
4166#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4167
af19b491 4168static void
aec1e845
AKS
4169qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4170 struct net_device *dev, unsigned long event)
af19b491
AKS
4171{
4172 struct in_device *indev;
af19b491 4173
af19b491
AKS
4174 indev = in_dev_get(dev);
4175 if (!indev)
4176 return;
4177
4178 for_ifa(indev) {
4179 switch (event) {
4180 case NETDEV_UP:
4181 qlcnic_config_ipaddr(adapter,
4182 ifa->ifa_address, QLCNIC_IP_UP);
4183 break;
4184 case NETDEV_DOWN:
4185 qlcnic_config_ipaddr(adapter,
4186 ifa->ifa_address, QLCNIC_IP_DOWN);
4187 break;
4188 default:
4189 break;
4190 }
4191 } endfor_ifa(indev);
4192
4193 in_dev_put(indev);
af19b491
AKS
4194}
4195
aec1e845
AKS
4196static void
4197qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4198{
4199 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4200 struct net_device *dev;
4201 u16 vid;
4202
4203 qlcnic_config_indev_addr(adapter, netdev, event);
4204
b9796a14
AC
4205 for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
4206 dev = vlan_find_dev(netdev, vid);
aec1e845
AKS
4207 if (!dev)
4208 continue;
aec1e845
AKS
4209 qlcnic_config_indev_addr(adapter, dev, event);
4210 }
4211}
4212
af19b491
AKS
4213static int qlcnic_netdev_event(struct notifier_block *this,
4214 unsigned long event, void *ptr)
4215{
4216 struct qlcnic_adapter *adapter;
4217 struct net_device *dev = (struct net_device *)ptr;
4218
4219recheck:
4220 if (dev == NULL)
4221 goto done;
4222
4223 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4224 dev = vlan_dev_real_dev(dev);
4225 goto recheck;
4226 }
4227
4228 if (!is_qlcnic_netdev(dev))
4229 goto done;
4230
4231 adapter = netdev_priv(dev);
4232
4233 if (!adapter)
4234 goto done;
4235
8a15ad1f 4236 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4237 goto done;
4238
aec1e845 4239 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4240done:
4241 return NOTIFY_DONE;
4242}
4243
4244static int
4245qlcnic_inetaddr_event(struct notifier_block *this,
4246 unsigned long event, void *ptr)
4247{
4248 struct qlcnic_adapter *adapter;
4249 struct net_device *dev;
4250
4251 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4252
4253 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4254
4255recheck:
aec1e845 4256 if (dev == NULL)
af19b491
AKS
4257 goto done;
4258
4259 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4260 dev = vlan_dev_real_dev(dev);
4261 goto recheck;
4262 }
4263
4264 if (!is_qlcnic_netdev(dev))
4265 goto done;
4266
4267 adapter = netdev_priv(dev);
4268
251a84c9 4269 if (!adapter)
af19b491
AKS
4270 goto done;
4271
8a15ad1f 4272 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4273 goto done;
4274
4275 switch (event) {
4276 case NETDEV_UP:
4277 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4278 break;
4279 case NETDEV_DOWN:
4280 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4281 break;
4282 default:
4283 break;
4284 }
4285
4286done:
4287 return NOTIFY_DONE;
4288}
4289
4290static struct notifier_block qlcnic_netdev_cb = {
4291 .notifier_call = qlcnic_netdev_event,
4292};
4293
4294static struct notifier_block qlcnic_inetaddr_cb = {
4295 .notifier_call = qlcnic_inetaddr_event,
4296};
4297#else
4298static void
aec1e845 4299qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4300{ }
4301#endif
451724c8
SC
4302static struct pci_error_handlers qlcnic_err_handler = {
4303 .error_detected = qlcnic_io_error_detected,
4304 .slot_reset = qlcnic_io_slot_reset,
4305 .resume = qlcnic_io_resume,
4306};
af19b491
AKS
4307
4308static struct pci_driver qlcnic_driver = {
4309 .name = qlcnic_driver_name,
4310 .id_table = qlcnic_pci_tbl,
4311 .probe = qlcnic_probe,
4312 .remove = __devexit_p(qlcnic_remove),
4313#ifdef CONFIG_PM
4314 .suspend = qlcnic_suspend,
4315 .resume = qlcnic_resume,
4316#endif
451724c8
SC
4317 .shutdown = qlcnic_shutdown,
4318 .err_handler = &qlcnic_err_handler
4319
af19b491
AKS
4320};
4321
4322static int __init qlcnic_init_module(void)
4323{
0cf3a14c 4324 int ret;
af19b491
AKS
4325
4326 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4327
f7ec804a
AKS
4328 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4329 if (qlcnic_wq == NULL) {
4330 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4331 return -ENOMEM;
4332 }
4333
af19b491
AKS
4334#ifdef CONFIG_INET
4335 register_netdevice_notifier(&qlcnic_netdev_cb);
4336 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4337#endif
4338
0cf3a14c
AKS
4339 ret = pci_register_driver(&qlcnic_driver);
4340 if (ret) {
4341#ifdef CONFIG_INET
4342 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4343 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4344#endif
f7ec804a 4345 destroy_workqueue(qlcnic_wq);
0cf3a14c 4346 }
af19b491 4347
0cf3a14c 4348 return ret;
af19b491
AKS
4349}
4350
4351module_init(qlcnic_init_module);
4352
4353static void __exit qlcnic_exit_module(void)
4354{
4355
4356 pci_unregister_driver(&qlcnic_driver);
4357
4358#ifdef CONFIG_INET
4359 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4360 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4361#endif
f7ec804a 4362 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4363}
4364
4365module_exit(qlcnic_exit_module);
This page took 0.444637 seconds and 5 git commands to generate.