1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/types.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/pci.h>
27 #include <linux/kthread.h>
28 #include <linux/netdevice.h>
29 #include "octeon_config.h"
30 #include "liquidio_common.h"
31 #include "octeon_droq.h"
32 #include "octeon_iq.h"
33 #include "response_manager.h"
34 #include "octeon_device.h"
35 #include "octeon_nic.h"
36 #include "octeon_main.h"
37 #include "octeon_network.h"
38 #include "cn66xx_regs.h"
39 #include "cn66xx_device.h"
40 #include "liquidio_image.h"
41 #include "octeon_mem_ops.h"
43 int lio_cn6xxx_soft_reset(struct octeon_device
*oct
)
45 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
47 dev_dbg(&oct
->pci_dev
->dev
, "BIST enabled for soft reset\n");
49 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_BIST
);
50 octeon_write_csr64(oct
, CN6XXX_SLI_SCRATCH1
, 0x1234ULL
);
52 lio_pci_readq(oct
, CN6XXX_CIU_SOFT_RST
);
53 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_RST
);
55 /* make sure that the reset is written before starting timer */
58 /* Wait for 10ms as Octeon resets. */
61 if (octeon_read_csr64(oct
, CN6XXX_SLI_SCRATCH1
) == 0x1234ULL
) {
62 dev_err(&oct
->pci_dev
->dev
, "Soft reset failed\n");
66 dev_dbg(&oct
->pci_dev
->dev
, "Reset completed\n");
67 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
72 void lio_cn6xxx_enable_error_reporting(struct octeon_device
*oct
)
76 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
77 if (val
& 0x000f0000) {
78 dev_err(&oct
->pci_dev
->dev
, "PCI-E Link error detected: 0x%08x\n",
82 val
|= 0xf; /* Enable Link error reporting */
84 dev_dbg(&oct
->pci_dev
->dev
, "Enabling PCI-E error reporting..\n");
85 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
88 void lio_cn6xxx_setup_pcie_mps(struct octeon_device
*oct
,
89 enum octeon_pcie_mps mps
)
94 /* Read config register for MPS */
95 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
97 if (mps
== PCIE_MPS_DEFAULT
) {
98 mps
= ((val
& (0x7 << 5)) >> 5);
100 val
&= ~(0x7 << 5); /* Turn off any MPS bits */
101 val
|= (mps
<< 5); /* Set MPS */
102 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
105 /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
106 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
108 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
111 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device
*oct
,
112 enum octeon_pcie_mrrs mrrs
)
117 /* Read config register for MRRS */
118 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
120 if (mrrs
== PCIE_MRRS_DEFAULT
) {
121 mrrs
= ((val
& (0x7 << 12)) >> 12);
123 val
&= ~(0x7 << 12); /* Turn off any MRRS bits */
124 val
|= (mrrs
<< 12); /* Set MRRS */
125 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
128 /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
129 r64
= octeon_read_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
));
131 octeon_write_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
), r64
);
133 /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
134 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
136 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
139 u32
lio_cn6xxx_coprocessor_clock(struct octeon_device
*oct
)
141 /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
144 return ((lio_pci_readq(oct
, CN6XXX_MIO_RST_BOOT
) >> 24) & 0x3f) * 50;
147 u32
lio_cn6xxx_get_oq_ticks(struct octeon_device
*oct
,
150 /* This gives the SLI clock per microsec */
151 u32 oqticks_per_us
= lio_cn6xxx_coprocessor_clock(oct
);
153 /* core clock per us / oq ticks will be fractional. TO avoid that
154 * we use the method below.
157 /* This gives the clock cycles per millisecond */
158 oqticks_per_us
*= 1000;
160 /* This gives the oq ticks (1024 core clock cycles) per millisecond */
161 oqticks_per_us
/= 1024;
163 /* time_intr is in microseconds. The next 2 steps gives the oq ticks
164 * corressponding to time_intr.
166 oqticks_per_us
*= time_intr_in_us
;
167 oqticks_per_us
/= 1000;
169 return oqticks_per_us
;
172 void lio_cn6xxx_setup_global_input_regs(struct octeon_device
*oct
)
174 /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
175 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INPUT_CONTROL
,
176 CN6XXX_INPUT_CTL_MASK
);
178 /* Instruction Read Size - Max 4 instructions per PCIE Read */
179 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_INSTR_RD_SIZE
,
180 0xFFFFFFFFFFFFFFFFULL
);
182 /* Select PCIE Port for all Input rings. */
183 octeon_write_csr64(oct
, CN6XXX_SLI_IN_PCIE_PORT
,
184 (oct
->pcie_port
* 0x5555555555555555ULL
));
187 static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device
*oct
)
191 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
193 pktctl
= octeon_read_csr64(oct
, CN6XXX_SLI_PKT_CTL
);
196 if (CFG_GET_OQ_MAX_Q(cn6xxx
->conf
) <= 4)
197 /* Disable RING_EN if only upto 4 rings are used. */
202 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
))
205 /* Disable per-port backpressure. */
207 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_CTL
, pktctl
);
210 void lio_cn6xxx_setup_global_output_regs(struct octeon_device
*oct
)
213 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
215 /* / Select PCI-E Port for all Output queues */
216 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_PCIE_PORT64
,
217 (oct
->pcie_port
* 0x5555555555555555ULL
));
219 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
)) {
220 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 32);
222 /* / Set Output queue watermark to 0 to disable backpressure */
223 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 0);
226 /* / Select Info Ptr for length & data */
227 octeon_write_csr(oct
, CN6XXX_SLI_PKT_IPTR
, 0xFFFFFFFF);
229 /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
230 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_BMODE
, 0);
232 /* / Select ES,RO,NS setting from register for Output Queue Packet
235 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DPADDR
, 0xFFFFFFFF);
237 /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
240 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_ROR
, 0);
241 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_NS
, 0);
243 /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
244 #ifdef __BIG_ENDIAN_BITFIELD
245 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
,
246 0x5555555555555555ULL
);
248 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
, 0ULL);
251 /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
252 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_ROR
, 0);
253 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_NS
, 0);
254 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_DATA_OUT_ES64
,
255 0x5555555555555555ULL
);
257 /* / Set up interrupt packet and time threshold */
258 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_PKTS
,
259 (u32
)CFG_GET_OQ_INTR_PKT(cn6xxx
->conf
));
261 lio_cn6xxx_get_oq_ticks(oct
, (u32
)
262 CFG_GET_OQ_INTR_TIME(cn6xxx
->conf
));
264 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_TIME
, time_threshold
);
267 static int lio_cn6xxx_setup_device_regs(struct octeon_device
*oct
)
269 lio_cn6xxx_setup_pcie_mps(oct
, PCIE_MPS_DEFAULT
);
270 lio_cn6xxx_setup_pcie_mrrs(oct
, PCIE_MRRS_512B
);
271 lio_cn6xxx_enable_error_reporting(oct
);
273 lio_cn6xxx_setup_global_input_regs(oct
);
274 lio_cn66xx_setup_pkt_ctl_regs(oct
);
275 lio_cn6xxx_setup_global_output_regs(oct
);
277 /* Default error timeout value should be 0x200000 to avoid host hang
278 * when reads invalid register
280 octeon_write_csr64(oct
, CN6XXX_SLI_WINDOW_CTL
, 0x200000ULL
);
284 void lio_cn6xxx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
286 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
288 /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */
289 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no
), 0);
291 /* Write the start of the input queue's ring and its size */
292 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no
),
294 octeon_write_csr(oct
, CN6XXX_SLI_IQ_SIZE(iq_no
), iq
->max_count
);
296 /* Remember the doorbell & instruction count register addr for this
299 iq
->doorbell_reg
= oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_IQ_DOORBELL(iq_no
);
300 iq
->inst_cnt_reg
= oct
->mmio
[0].hw_addr
301 + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no
);
302 dev_dbg(&oct
->pci_dev
->dev
, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
303 iq_no
, iq
->doorbell_reg
, iq
->inst_cnt_reg
);
305 /* Store the current instruction counter
306 * (used in flush_iq calculation)
308 iq
->reset_instr_cnt
= readl(iq
->inst_cnt_reg
);
311 static void lio_cn66xx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
313 lio_cn6xxx_setup_iq_regs(oct
, iq_no
);
315 /* Backpressure for this queue - WMARK set to all F's. This effectively
316 * disables the backpressure mechanism.
318 octeon_write_csr64(oct
, CN66XX_SLI_IQ_BP64(iq_no
),
319 (0xFFFFFFFFULL
<< 32));
322 void lio_cn6xxx_setup_oq_regs(struct octeon_device
*oct
, u32 oq_no
)
325 struct octeon_droq
*droq
= oct
->droq
[oq_no
];
327 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no
),
328 droq
->desc_ring_dma
);
329 octeon_write_csr(oct
, CN6XXX_SLI_OQ_SIZE(oq_no
), droq
->max_count
);
331 octeon_write_csr(oct
, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no
),
332 (droq
->buffer_size
| (OCT_RH_SIZE
<< 16)));
334 /* Get the mapped address of the pkt_sent and pkts_credit regs */
335 droq
->pkts_sent_reg
=
336 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_SENT(oq_no
);
337 droq
->pkts_credit_reg
=
338 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no
);
340 /* Enable this output queue to generate Packet Timer Interrupt */
341 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
342 intr
|= (1 << oq_no
);
343 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
, intr
);
345 /* Enable this output queue to generate Packet Timer Interrupt */
346 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
347 intr
|= (1 << oq_no
);
348 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
, intr
);
351 void lio_cn6xxx_enable_io_queues(struct octeon_device
*oct
)
355 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
);
356 mask
|= oct
->io_qmask
.iq64B
;
357 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
, mask
);
359 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
360 mask
|= oct
->io_qmask
.iq
;
361 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
363 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
364 mask
|= oct
->io_qmask
.oq
;
365 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
368 void lio_cn6xxx_disable_io_queues(struct octeon_device
*oct
)
370 u32 mask
, i
, loop
= HZ
;
373 /* Reset the Enable bits for Input Queues. */
374 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
375 mask
^= oct
->io_qmask
.iq
;
376 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
378 /* Wait until hardware indicates that the queues are out of reset. */
379 mask
= oct
->io_qmask
.iq
;
380 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
381 while (((d32
& mask
) != mask
) && loop
--) {
382 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
383 schedule_timeout_uninterruptible(1);
386 /* Reset the doorbell register for each Input queue. */
387 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES
; i
++) {
388 if (!(oct
->io_qmask
.iq
& (1UL << i
)))
390 octeon_write_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
), 0xFFFFFFFF);
391 d32
= octeon_read_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
));
394 /* Reset the Enable bits for Output Queues. */
395 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
396 mask
^= oct
->io_qmask
.oq
;
397 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
399 /* Wait until hardware indicates that the queues are out of reset. */
401 mask
= oct
->io_qmask
.oq
;
402 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
403 while (((d32
& mask
) != mask
) && loop
--) {
404 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
405 schedule_timeout_uninterruptible(1);
409 /* Reset the doorbell register for each Output queue. */
410 /* for (i = 0; i < oct->num_oqs; i++) { */
411 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES
; i
++) {
412 if (!(oct
->io_qmask
.oq
& (1UL << i
)))
414 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
), 0xFFFFFFFF);
415 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
));
417 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
));
418 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
), d32
);
421 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
423 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, d32
);
425 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
427 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, d32
);
430 void lio_cn6xxx_reinit_regs(struct octeon_device
*oct
)
434 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES
; i
++) {
435 if (!(oct
->io_qmask
.iq
& (1UL << i
)))
437 oct
->fn_list
.setup_iq_regs(oct
, i
);
440 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES
; i
++) {
441 if (!(oct
->io_qmask
.oq
& (1UL << i
)))
443 oct
->fn_list
.setup_oq_regs(oct
, i
);
446 oct
->fn_list
.setup_device_regs(oct
);
448 oct
->fn_list
.enable_interrupt(oct
->chip
);
450 oct
->fn_list
.enable_io_queues(oct
);
452 /* for (i = 0; i < oct->num_oqs; i++) { */
453 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES
; i
++) {
454 if (!(oct
->io_qmask
.oq
& (1UL << i
)))
456 writel(oct
->droq
[i
]->max_count
, oct
->droq
[i
]->pkts_credit_reg
);
461 lio_cn6xxx_bar1_idx_setup(struct octeon_device
*oct
,
469 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
470 lio_pci_writeq(oct
, (bar1
& 0xFFFFFFFEULL
),
471 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
472 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
476 /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
479 lio_pci_writeq(oct
, (((core_addr
>> 22) << 4) | PCI_BAR1_MASK
),
480 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
482 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
485 void lio_cn6xxx_bar1_idx_write(struct octeon_device
*oct
,
489 lio_pci_writeq(oct
, mask
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
492 u32
lio_cn6xxx_bar1_idx_read(struct octeon_device
*oct
, u32 idx
)
494 return (u32
)lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
498 lio_cn6xxx_update_read_index(struct octeon_device
*oct
__attribute__((unused
)),
499 struct octeon_instr_queue
*iq
)
501 u32 new_idx
= readl(iq
->inst_cnt_reg
);
503 /* The new instr cnt reg is a 32-bit counter that can roll over. We have
504 * noted the counter's initial value at init time into
507 if (iq
->reset_instr_cnt
< new_idx
)
508 new_idx
-= iq
->reset_instr_cnt
;
510 new_idx
+= (0xffffffff - iq
->reset_instr_cnt
) + 1;
512 /* Modulo of the new index with the IQ size will give us
515 new_idx
%= iq
->max_count
;
520 void lio_cn6xxx_enable_interrupt(void *chip
)
522 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
523 u64 mask
= cn6xxx
->intr_mask64
| CN6XXX_INTR_DMA0_FORCE
;
525 /* Enable Interrupt */
526 writeq(mask
, cn6xxx
->intr_enb_reg64
);
529 void lio_cn6xxx_disable_interrupt(void *chip
)
531 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
533 /* Disable Interrupts */
534 writeq(0, cn6xxx
->intr_enb_reg64
);
536 /* make sure interrupts are really disabled */
540 static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device
*oct
)
542 /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
543 * to determine the PCIE port #
545 oct
->pcie_port
= octeon_read_csr(oct
, CN6XXX_SLI_MAC_NUMBER
) & 0xff;
547 dev_dbg(&oct
->pci_dev
->dev
, "Using PCIE Port %d\n", oct
->pcie_port
);
551 lio_cn6xxx_process_pcie_error_intr(struct octeon_device
*oct
, u64 intr64
)
553 dev_err(&oct
->pci_dev
->dev
, "Error Intr: 0x%016llx\n",
557 int lio_cn6xxx_process_droq_intr_regs(struct octeon_device
*oct
)
559 struct octeon_droq
*droq
;
560 u32 oq_no
, pkt_count
, droq_time_mask
, droq_mask
, droq_int_enb
;
561 u32 droq_cnt_enb
, droq_cnt_mask
;
563 droq_cnt_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
564 droq_cnt_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
565 droq_mask
= droq_cnt_mask
& droq_cnt_enb
;
567 droq_time_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
568 droq_int_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
569 droq_mask
|= (droq_time_mask
& droq_int_enb
);
571 droq_mask
&= oct
->io_qmask
.oq
;
575 /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
576 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES
; oq_no
++) {
577 if (!(droq_mask
& (1 << oq_no
)))
580 droq
= oct
->droq
[oq_no
];
581 pkt_count
= octeon_droq_check_hw_for_pkts(oct
, droq
);
583 oct
->droq_intr
|= (1ULL << oq_no
);
584 if (droq
->ops
.poll_mode
) {
588 struct octeon_cn6xxx
*cn6xxx
=
589 (struct octeon_cn6xxx
*)oct
->chip
;
591 /* disable interrupts for this droq */
593 (&cn6xxx
->lock_for_droq_int_enb_reg
);
594 reg
= CN6XXX_SLI_PKT_TIME_INT_ENB
;
595 value
= octeon_read_csr(oct
, reg
);
596 value
&= ~(1 << oq_no
);
597 octeon_write_csr(oct
, reg
, value
);
598 reg
= CN6XXX_SLI_PKT_CNT_INT_ENB
;
599 value
= octeon_read_csr(oct
, reg
);
600 value
&= ~(1 << oq_no
);
601 octeon_write_csr(oct
, reg
, value
);
603 /* Ensure that the enable register is written.
607 spin_unlock(&cn6xxx
->lock_for_droq_int_enb_reg
);
612 droq_time_mask
&= oct
->io_qmask
.oq
;
613 droq_cnt_mask
&= oct
->io_qmask
.oq
;
615 /* Reset the PKT_CNT/TIME_INT registers. */
617 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, droq_time_mask
);
619 if (droq_cnt_mask
) /* reset PKT_CNT register:66xx */
620 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, droq_cnt_mask
);
625 irqreturn_t
lio_cn6xxx_process_interrupt_regs(void *dev
)
627 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
628 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
631 intr64
= readq(cn6xxx
->intr_sum_reg64
);
633 /* If our device has interrupted, then proceed.
634 * Also check for all f's if interrupt was triggered on an error
635 * and the PCI read fails.
637 if (!intr64
|| (intr64
== 0xFFFFFFFFFFFFFFFFULL
))
642 if (intr64
& CN6XXX_INTR_ERR
)
643 lio_cn6xxx_process_pcie_error_intr(oct
, intr64
);
645 if (intr64
& CN6XXX_INTR_PKT_DATA
) {
646 lio_cn6xxx_process_droq_intr_regs(oct
);
647 oct
->int_status
|= OCT_DEV_INTR_PKT_DATA
;
650 if (intr64
& CN6XXX_INTR_DMA0_FORCE
)
651 oct
->int_status
|= OCT_DEV_INTR_DMA0_FORCE
;
653 if (intr64
& CN6XXX_INTR_DMA1_FORCE
)
654 oct
->int_status
|= OCT_DEV_INTR_DMA1_FORCE
;
656 /* Clear the current interrupts */
657 writeq(intr64
, cn6xxx
->intr_sum_reg64
);
662 void lio_cn6xxx_setup_reg_address(struct octeon_device
*oct
,
664 struct octeon_reg_list
*reg_list
)
666 u8 __iomem
*bar0_pciaddr
= oct
->mmio
[0].hw_addr
;
667 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
669 reg_list
->pci_win_wr_addr_hi
=
670 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_HI
);
671 reg_list
->pci_win_wr_addr_lo
=
672 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_LO
);
673 reg_list
->pci_win_wr_addr
=
674 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR64
);
676 reg_list
->pci_win_rd_addr_hi
=
677 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_HI
);
678 reg_list
->pci_win_rd_addr_lo
=
679 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_LO
);
680 reg_list
->pci_win_rd_addr
=
681 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR64
);
683 reg_list
->pci_win_wr_data_hi
=
684 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_HI
);
685 reg_list
->pci_win_wr_data_lo
=
686 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_LO
);
687 reg_list
->pci_win_wr_data
=
688 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA64
);
690 reg_list
->pci_win_rd_data_hi
=
691 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_HI
);
692 reg_list
->pci_win_rd_data_lo
=
693 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_LO
);
694 reg_list
->pci_win_rd_data
=
695 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA64
);
697 lio_cn6xxx_get_pcie_qlmport(oct
);
699 cn6xxx
->intr_sum_reg64
= bar0_pciaddr
+ CN6XXX_SLI_INT_SUM64
;
700 cn6xxx
->intr_mask64
= CN6XXX_INTR_MASK
;
701 cn6xxx
->intr_enb_reg64
=
702 bar0_pciaddr
+ CN6XXX_SLI_INT_ENB64(oct
->pcie_port
);
705 int lio_setup_cn66xx_octeon_device(struct octeon_device
*oct
)
707 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
709 if (octeon_map_pci_barx(oct
, 0, 0))
712 if (octeon_map_pci_barx(oct
, 1, MAX_BAR1_IOREMAP_SIZE
)) {
713 dev_err(&oct
->pci_dev
->dev
, "%s CN66XX BAR1 map failed\n",
715 octeon_unmap_pci_barx(oct
, 0);
719 spin_lock_init(&cn6xxx
->lock_for_droq_int_enb_reg
);
721 oct
->fn_list
.setup_iq_regs
= lio_cn66xx_setup_iq_regs
;
722 oct
->fn_list
.setup_oq_regs
= lio_cn6xxx_setup_oq_regs
;
724 oct
->fn_list
.soft_reset
= lio_cn6xxx_soft_reset
;
725 oct
->fn_list
.setup_device_regs
= lio_cn6xxx_setup_device_regs
;
726 oct
->fn_list
.reinit_regs
= lio_cn6xxx_reinit_regs
;
727 oct
->fn_list
.update_iq_read_idx
= lio_cn6xxx_update_read_index
;
729 oct
->fn_list
.bar1_idx_setup
= lio_cn6xxx_bar1_idx_setup
;
730 oct
->fn_list
.bar1_idx_write
= lio_cn6xxx_bar1_idx_write
;
731 oct
->fn_list
.bar1_idx_read
= lio_cn6xxx_bar1_idx_read
;
733 oct
->fn_list
.process_interrupt_regs
= lio_cn6xxx_process_interrupt_regs
;
734 oct
->fn_list
.enable_interrupt
= lio_cn6xxx_enable_interrupt
;
735 oct
->fn_list
.disable_interrupt
= lio_cn6xxx_disable_interrupt
;
737 oct
->fn_list
.enable_io_queues
= lio_cn6xxx_enable_io_queues
;
738 oct
->fn_list
.disable_io_queues
= lio_cn6xxx_disable_io_queues
;
740 lio_cn6xxx_setup_reg_address(oct
, oct
->chip
, &oct
->reg_list
);
742 cn6xxx
->conf
= (struct octeon_config
*)
743 oct_get_config_info(oct
, LIO_210SV
);
745 dev_err(&oct
->pci_dev
->dev
, "%s No Config found for CN66XX\n",
747 octeon_unmap_pci_barx(oct
, 0);
748 octeon_unmap_pci_barx(oct
, 1);
752 oct
->coproc_clock_rate
= 1000000ULL * lio_cn6xxx_coprocessor_clock(oct
);
757 int lio_validate_cn6xxx_config_info(struct octeon_device
*oct
,
758 struct octeon_config
*conf6xxx
)
760 /* int total_instrs = 0; */
762 if (CFG_GET_IQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_INPUT_QUEUES
) {
763 dev_err(&oct
->pci_dev
->dev
, "%s: Num IQ (%d) exceeds Max (%d)\n",
764 __func__
, CFG_GET_IQ_MAX_Q(conf6xxx
),
765 CN6XXX_MAX_INPUT_QUEUES
);
769 if (CFG_GET_OQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_OUTPUT_QUEUES
) {
770 dev_err(&oct
->pci_dev
->dev
, "%s: Num OQ (%d) exceeds Max (%d)\n",
771 __func__
, CFG_GET_OQ_MAX_Q(conf6xxx
),
772 CN6XXX_MAX_OUTPUT_QUEUES
);
776 if (CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_32BYTE_INSTR
&&
777 CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_64BYTE_INSTR
) {
778 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid instr type for IQ\n",
782 if (!(CFG_GET_OQ_INFO_PTR(conf6xxx
)) ||
783 !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx
))) {
784 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid parameter for OQ\n",
789 if (!(CFG_GET_OQ_INTR_TIME(conf6xxx
))) {
790 dev_err(&oct
->pci_dev
->dev
, "%s: No Time Interrupt for OQ\n",