2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/bitmap.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/log2.h>
24 #include <linux/msi.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_pci.h>
29 #include <linux/of_platform.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
36 #include <asm/cacheflush.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
40 #include "irq-gic-common.h"
42 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
45 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
48 * Collection structure - just an ID, and a redistributor address to
49 * ping. We use one per CPU as a bag of interrupts assigned to this
52 struct its_collection
{
58 * The ITS_BASER structure - contains memory information and cached
59 * value of BASER register configuration.
68 * The ITS structure - contains most of the infrastructure, with the
69 * top-level MSI domain, the command queue, the collections, and the
70 * list of devices writing to it.
74 struct list_head entry
;
76 unsigned long phys_base
;
77 struct its_cmd_block
*cmd_base
;
78 struct its_cmd_block
*cmd_write
;
79 struct its_baser tables
[GITS_BASER_NR_REGS
];
80 struct its_collection
*collections
;
81 struct list_head its_device_list
;
87 #define ITS_ITT_ALIGN SZ_256
89 /* Convert page order to size in bytes */
90 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
92 struct event_lpi_map
{
93 unsigned long *lpi_map
;
95 irq_hw_number_t lpi_base
;
100 * The ITS view of a device - belongs to an ITS, a collection, owns an
101 * interrupt translation table, and a list of interrupts.
104 struct list_head entry
;
105 struct its_node
*its
;
106 struct event_lpi_map event_map
;
112 static LIST_HEAD(its_nodes
);
113 static DEFINE_SPINLOCK(its_lock
);
114 static struct rdists
*gic_rdists
;
116 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
117 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
119 static struct its_collection
*dev_event_to_col(struct its_device
*its_dev
,
122 struct its_node
*its
= its_dev
->its
;
124 return its
->collections
+ its_dev
->event_map
.col_map
[event
];
128 * ITS command descriptors - parameters to be encoded in a command
131 struct its_cmd_desc
{
134 struct its_device
*dev
;
139 struct its_device
*dev
;
144 struct its_device
*dev
;
149 struct its_collection
*col
;
154 struct its_device
*dev
;
160 struct its_device
*dev
;
161 struct its_collection
*col
;
166 struct its_device
*dev
;
171 struct its_collection
*col
;
177 * The ITS command block, which is what the ITS actually parses.
179 struct its_cmd_block
{
183 #define ITS_CMD_QUEUE_SZ SZ_64K
184 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
186 typedef struct its_collection
*(*its_cmd_builder_t
)(struct its_cmd_block
*,
187 struct its_cmd_desc
*);
189 static void its_encode_cmd(struct its_cmd_block
*cmd
, u8 cmd_nr
)
191 cmd
->raw_cmd
[0] &= ~0xffUL
;
192 cmd
->raw_cmd
[0] |= cmd_nr
;
195 static void its_encode_devid(struct its_cmd_block
*cmd
, u32 devid
)
197 cmd
->raw_cmd
[0] &= BIT_ULL(32) - 1;
198 cmd
->raw_cmd
[0] |= ((u64
)devid
) << 32;
201 static void its_encode_event_id(struct its_cmd_block
*cmd
, u32 id
)
203 cmd
->raw_cmd
[1] &= ~0xffffffffUL
;
204 cmd
->raw_cmd
[1] |= id
;
207 static void its_encode_phys_id(struct its_cmd_block
*cmd
, u32 phys_id
)
209 cmd
->raw_cmd
[1] &= 0xffffffffUL
;
210 cmd
->raw_cmd
[1] |= ((u64
)phys_id
) << 32;
213 static void its_encode_size(struct its_cmd_block
*cmd
, u8 size
)
215 cmd
->raw_cmd
[1] &= ~0x1fUL
;
216 cmd
->raw_cmd
[1] |= size
& 0x1f;
219 static void its_encode_itt(struct its_cmd_block
*cmd
, u64 itt_addr
)
221 cmd
->raw_cmd
[2] &= ~0xffffffffffffUL
;
222 cmd
->raw_cmd
[2] |= itt_addr
& 0xffffffffff00UL
;
225 static void its_encode_valid(struct its_cmd_block
*cmd
, int valid
)
227 cmd
->raw_cmd
[2] &= ~(1UL << 63);
228 cmd
->raw_cmd
[2] |= ((u64
)!!valid
) << 63;
231 static void its_encode_target(struct its_cmd_block
*cmd
, u64 target_addr
)
233 cmd
->raw_cmd
[2] &= ~(0xffffffffUL
<< 16);
234 cmd
->raw_cmd
[2] |= (target_addr
& (0xffffffffUL
<< 16));
237 static void its_encode_collection(struct its_cmd_block
*cmd
, u16 col
)
239 cmd
->raw_cmd
[2] &= ~0xffffUL
;
240 cmd
->raw_cmd
[2] |= col
;
243 static inline void its_fixup_cmd(struct its_cmd_block
*cmd
)
245 /* Let's fixup BE commands */
246 cmd
->raw_cmd
[0] = cpu_to_le64(cmd
->raw_cmd
[0]);
247 cmd
->raw_cmd
[1] = cpu_to_le64(cmd
->raw_cmd
[1]);
248 cmd
->raw_cmd
[2] = cpu_to_le64(cmd
->raw_cmd
[2]);
249 cmd
->raw_cmd
[3] = cpu_to_le64(cmd
->raw_cmd
[3]);
252 static struct its_collection
*its_build_mapd_cmd(struct its_cmd_block
*cmd
,
253 struct its_cmd_desc
*desc
)
255 unsigned long itt_addr
;
256 u8 size
= ilog2(desc
->its_mapd_cmd
.dev
->nr_ites
);
258 itt_addr
= virt_to_phys(desc
->its_mapd_cmd
.dev
->itt
);
259 itt_addr
= ALIGN(itt_addr
, ITS_ITT_ALIGN
);
261 its_encode_cmd(cmd
, GITS_CMD_MAPD
);
262 its_encode_devid(cmd
, desc
->its_mapd_cmd
.dev
->device_id
);
263 its_encode_size(cmd
, size
- 1);
264 its_encode_itt(cmd
, itt_addr
);
265 its_encode_valid(cmd
, desc
->its_mapd_cmd
.valid
);
272 static struct its_collection
*its_build_mapc_cmd(struct its_cmd_block
*cmd
,
273 struct its_cmd_desc
*desc
)
275 its_encode_cmd(cmd
, GITS_CMD_MAPC
);
276 its_encode_collection(cmd
, desc
->its_mapc_cmd
.col
->col_id
);
277 its_encode_target(cmd
, desc
->its_mapc_cmd
.col
->target_address
);
278 its_encode_valid(cmd
, desc
->its_mapc_cmd
.valid
);
282 return desc
->its_mapc_cmd
.col
;
285 static struct its_collection
*its_build_mapvi_cmd(struct its_cmd_block
*cmd
,
286 struct its_cmd_desc
*desc
)
288 struct its_collection
*col
;
290 col
= dev_event_to_col(desc
->its_mapvi_cmd
.dev
,
291 desc
->its_mapvi_cmd
.event_id
);
293 its_encode_cmd(cmd
, GITS_CMD_MAPVI
);
294 its_encode_devid(cmd
, desc
->its_mapvi_cmd
.dev
->device_id
);
295 its_encode_event_id(cmd
, desc
->its_mapvi_cmd
.event_id
);
296 its_encode_phys_id(cmd
, desc
->its_mapvi_cmd
.phys_id
);
297 its_encode_collection(cmd
, col
->col_id
);
304 static struct its_collection
*its_build_movi_cmd(struct its_cmd_block
*cmd
,
305 struct its_cmd_desc
*desc
)
307 struct its_collection
*col
;
309 col
= dev_event_to_col(desc
->its_movi_cmd
.dev
,
310 desc
->its_movi_cmd
.event_id
);
312 its_encode_cmd(cmd
, GITS_CMD_MOVI
);
313 its_encode_devid(cmd
, desc
->its_movi_cmd
.dev
->device_id
);
314 its_encode_event_id(cmd
, desc
->its_movi_cmd
.event_id
);
315 its_encode_collection(cmd
, desc
->its_movi_cmd
.col
->col_id
);
322 static struct its_collection
*its_build_discard_cmd(struct its_cmd_block
*cmd
,
323 struct its_cmd_desc
*desc
)
325 struct its_collection
*col
;
327 col
= dev_event_to_col(desc
->its_discard_cmd
.dev
,
328 desc
->its_discard_cmd
.event_id
);
330 its_encode_cmd(cmd
, GITS_CMD_DISCARD
);
331 its_encode_devid(cmd
, desc
->its_discard_cmd
.dev
->device_id
);
332 its_encode_event_id(cmd
, desc
->its_discard_cmd
.event_id
);
339 static struct its_collection
*its_build_inv_cmd(struct its_cmd_block
*cmd
,
340 struct its_cmd_desc
*desc
)
342 struct its_collection
*col
;
344 col
= dev_event_to_col(desc
->its_inv_cmd
.dev
,
345 desc
->its_inv_cmd
.event_id
);
347 its_encode_cmd(cmd
, GITS_CMD_INV
);
348 its_encode_devid(cmd
, desc
->its_inv_cmd
.dev
->device_id
);
349 its_encode_event_id(cmd
, desc
->its_inv_cmd
.event_id
);
356 static struct its_collection
*its_build_invall_cmd(struct its_cmd_block
*cmd
,
357 struct its_cmd_desc
*desc
)
359 its_encode_cmd(cmd
, GITS_CMD_INVALL
);
360 its_encode_collection(cmd
, desc
->its_mapc_cmd
.col
->col_id
);
367 static u64
its_cmd_ptr_to_offset(struct its_node
*its
,
368 struct its_cmd_block
*ptr
)
370 return (ptr
- its
->cmd_base
) * sizeof(*ptr
);
373 static int its_queue_full(struct its_node
*its
)
378 widx
= its
->cmd_write
- its
->cmd_base
;
379 ridx
= readl_relaxed(its
->base
+ GITS_CREADR
) / sizeof(struct its_cmd_block
);
381 /* This is incredibly unlikely to happen, unless the ITS locks up. */
382 if (((widx
+ 1) % ITS_CMD_QUEUE_NR_ENTRIES
) == ridx
)
388 static struct its_cmd_block
*its_allocate_entry(struct its_node
*its
)
390 struct its_cmd_block
*cmd
;
391 u32 count
= 1000000; /* 1s! */
393 while (its_queue_full(its
)) {
396 pr_err_ratelimited("ITS queue not draining\n");
403 cmd
= its
->cmd_write
++;
405 /* Handle queue wrapping */
406 if (its
->cmd_write
== (its
->cmd_base
+ ITS_CMD_QUEUE_NR_ENTRIES
))
407 its
->cmd_write
= its
->cmd_base
;
412 static struct its_cmd_block
*its_post_commands(struct its_node
*its
)
414 u64 wr
= its_cmd_ptr_to_offset(its
, its
->cmd_write
);
416 writel_relaxed(wr
, its
->base
+ GITS_CWRITER
);
418 return its
->cmd_write
;
421 static void its_flush_cmd(struct its_node
*its
, struct its_cmd_block
*cmd
)
424 * Make sure the commands written to memory are observable by
427 if (its
->flags
& ITS_FLAGS_CMDQ_NEEDS_FLUSHING
)
428 __flush_dcache_area(cmd
, sizeof(*cmd
));
433 static void its_wait_for_range_completion(struct its_node
*its
,
434 struct its_cmd_block
*from
,
435 struct its_cmd_block
*to
)
437 u64 rd_idx
, from_idx
, to_idx
;
438 u32 count
= 1000000; /* 1s! */
440 from_idx
= its_cmd_ptr_to_offset(its
, from
);
441 to_idx
= its_cmd_ptr_to_offset(its
, to
);
444 rd_idx
= readl_relaxed(its
->base
+ GITS_CREADR
);
445 if (rd_idx
>= to_idx
|| rd_idx
< from_idx
)
450 pr_err_ratelimited("ITS queue timeout\n");
458 static void its_send_single_command(struct its_node
*its
,
459 its_cmd_builder_t builder
,
460 struct its_cmd_desc
*desc
)
462 struct its_cmd_block
*cmd
, *sync_cmd
, *next_cmd
;
463 struct its_collection
*sync_col
;
466 raw_spin_lock_irqsave(&its
->lock
, flags
);
468 cmd
= its_allocate_entry(its
);
469 if (!cmd
) { /* We're soooooo screewed... */
470 pr_err_ratelimited("ITS can't allocate, dropping command\n");
471 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
474 sync_col
= builder(cmd
, desc
);
475 its_flush_cmd(its
, cmd
);
478 sync_cmd
= its_allocate_entry(its
);
480 pr_err_ratelimited("ITS can't SYNC, skipping\n");
483 its_encode_cmd(sync_cmd
, GITS_CMD_SYNC
);
484 its_encode_target(sync_cmd
, sync_col
->target_address
);
485 its_fixup_cmd(sync_cmd
);
486 its_flush_cmd(its
, sync_cmd
);
490 next_cmd
= its_post_commands(its
);
491 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
493 its_wait_for_range_completion(its
, cmd
, next_cmd
);
496 static void its_send_inv(struct its_device
*dev
, u32 event_id
)
498 struct its_cmd_desc desc
;
500 desc
.its_inv_cmd
.dev
= dev
;
501 desc
.its_inv_cmd
.event_id
= event_id
;
503 its_send_single_command(dev
->its
, its_build_inv_cmd
, &desc
);
506 static void its_send_mapd(struct its_device
*dev
, int valid
)
508 struct its_cmd_desc desc
;
510 desc
.its_mapd_cmd
.dev
= dev
;
511 desc
.its_mapd_cmd
.valid
= !!valid
;
513 its_send_single_command(dev
->its
, its_build_mapd_cmd
, &desc
);
516 static void its_send_mapc(struct its_node
*its
, struct its_collection
*col
,
519 struct its_cmd_desc desc
;
521 desc
.its_mapc_cmd
.col
= col
;
522 desc
.its_mapc_cmd
.valid
= !!valid
;
524 its_send_single_command(its
, its_build_mapc_cmd
, &desc
);
527 static void its_send_mapvi(struct its_device
*dev
, u32 irq_id
, u32 id
)
529 struct its_cmd_desc desc
;
531 desc
.its_mapvi_cmd
.dev
= dev
;
532 desc
.its_mapvi_cmd
.phys_id
= irq_id
;
533 desc
.its_mapvi_cmd
.event_id
= id
;
535 its_send_single_command(dev
->its
, its_build_mapvi_cmd
, &desc
);
538 static void its_send_movi(struct its_device
*dev
,
539 struct its_collection
*col
, u32 id
)
541 struct its_cmd_desc desc
;
543 desc
.its_movi_cmd
.dev
= dev
;
544 desc
.its_movi_cmd
.col
= col
;
545 desc
.its_movi_cmd
.event_id
= id
;
547 its_send_single_command(dev
->its
, its_build_movi_cmd
, &desc
);
550 static void its_send_discard(struct its_device
*dev
, u32 id
)
552 struct its_cmd_desc desc
;
554 desc
.its_discard_cmd
.dev
= dev
;
555 desc
.its_discard_cmd
.event_id
= id
;
557 its_send_single_command(dev
->its
, its_build_discard_cmd
, &desc
);
560 static void its_send_invall(struct its_node
*its
, struct its_collection
*col
)
562 struct its_cmd_desc desc
;
564 desc
.its_invall_cmd
.col
= col
;
566 its_send_single_command(its
, its_build_invall_cmd
, &desc
);
570 * irqchip functions - assumes MSI, mostly.
573 static inline u32
its_get_event_id(struct irq_data
*d
)
575 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
576 return d
->hwirq
- its_dev
->event_map
.lpi_base
;
579 static void lpi_set_config(struct irq_data
*d
, bool enable
)
581 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
582 irq_hw_number_t hwirq
= d
->hwirq
;
583 u32 id
= its_get_event_id(d
);
584 u8
*cfg
= page_address(gic_rdists
->prop_page
) + hwirq
- 8192;
587 *cfg
|= LPI_PROP_ENABLED
;
589 *cfg
&= ~LPI_PROP_ENABLED
;
592 * Make the above write visible to the redistributors.
593 * And yes, we're flushing exactly: One. Single. Byte.
596 if (gic_rdists
->flags
& RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
)
597 __flush_dcache_area(cfg
, sizeof(*cfg
));
600 its_send_inv(its_dev
, id
);
603 static void its_mask_irq(struct irq_data
*d
)
605 lpi_set_config(d
, false);
608 static void its_unmask_irq(struct irq_data
*d
)
610 lpi_set_config(d
, true);
613 static int its_set_affinity(struct irq_data
*d
, const struct cpumask
*mask_val
,
616 unsigned int cpu
= cpumask_any_and(mask_val
, cpu_online_mask
);
617 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
618 struct its_collection
*target_col
;
619 u32 id
= its_get_event_id(d
);
621 if (cpu
>= nr_cpu_ids
)
624 target_col
= &its_dev
->its
->collections
[cpu
];
625 its_send_movi(its_dev
, target_col
, id
);
626 its_dev
->event_map
.col_map
[id
] = cpu
;
628 return IRQ_SET_MASK_OK_DONE
;
631 static void its_irq_compose_msi_msg(struct irq_data
*d
, struct msi_msg
*msg
)
633 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
634 struct its_node
*its
;
638 addr
= its
->phys_base
+ GITS_TRANSLATER
;
640 msg
->address_lo
= addr
& ((1UL << 32) - 1);
641 msg
->address_hi
= addr
>> 32;
642 msg
->data
= its_get_event_id(d
);
645 static struct irq_chip its_irq_chip
= {
647 .irq_mask
= its_mask_irq
,
648 .irq_unmask
= its_unmask_irq
,
649 .irq_eoi
= irq_chip_eoi_parent
,
650 .irq_set_affinity
= its_set_affinity
,
651 .irq_compose_msi_msg
= its_irq_compose_msi_msg
,
655 * How we allocate LPIs:
657 * The GIC has id_bits bits for interrupt identifiers. From there, we
658 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
659 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
662 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
664 #define IRQS_PER_CHUNK_SHIFT 5
665 #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
667 static unsigned long *lpi_bitmap
;
668 static u32 lpi_chunks
;
669 static DEFINE_SPINLOCK(lpi_lock
);
671 static int its_lpi_to_chunk(int lpi
)
673 return (lpi
- 8192) >> IRQS_PER_CHUNK_SHIFT
;
676 static int its_chunk_to_lpi(int chunk
)
678 return (chunk
<< IRQS_PER_CHUNK_SHIFT
) + 8192;
681 static int __init
its_lpi_init(u32 id_bits
)
683 lpi_chunks
= its_lpi_to_chunk(1UL << id_bits
);
685 lpi_bitmap
= kzalloc(BITS_TO_LONGS(lpi_chunks
) * sizeof(long),
692 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks
);
696 static unsigned long *its_lpi_alloc_chunks(int nr_irqs
, int *base
, int *nr_ids
)
698 unsigned long *bitmap
= NULL
;
703 nr_chunks
= DIV_ROUND_UP(nr_irqs
, IRQS_PER_CHUNK
);
705 spin_lock(&lpi_lock
);
708 chunk_id
= bitmap_find_next_zero_area(lpi_bitmap
, lpi_chunks
,
710 if (chunk_id
< lpi_chunks
)
714 } while (nr_chunks
> 0);
719 bitmap
= kzalloc(BITS_TO_LONGS(nr_chunks
* IRQS_PER_CHUNK
) * sizeof (long),
724 for (i
= 0; i
< nr_chunks
; i
++)
725 set_bit(chunk_id
+ i
, lpi_bitmap
);
727 *base
= its_chunk_to_lpi(chunk_id
);
728 *nr_ids
= nr_chunks
* IRQS_PER_CHUNK
;
731 spin_unlock(&lpi_lock
);
739 static void its_lpi_free(struct event_lpi_map
*map
)
741 int base
= map
->lpi_base
;
742 int nr_ids
= map
->nr_lpis
;
745 spin_lock(&lpi_lock
);
747 for (lpi
= base
; lpi
< (base
+ nr_ids
); lpi
+= IRQS_PER_CHUNK
) {
748 int chunk
= its_lpi_to_chunk(lpi
);
749 BUG_ON(chunk
> lpi_chunks
);
750 if (test_bit(chunk
, lpi_bitmap
)) {
751 clear_bit(chunk
, lpi_bitmap
);
753 pr_err("Bad LPI chunk %d\n", chunk
);
757 spin_unlock(&lpi_lock
);
764 * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
765 * deal with (one configuration byte per interrupt). PENDBASE has to
766 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
768 #define LPI_PROPBASE_SZ SZ_64K
769 #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
772 * This is how many bits of ID we need, including the useless ones.
774 #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
776 #define LPI_PROP_DEFAULT_PRIO 0xa0
778 static int __init
its_alloc_lpi_tables(void)
782 gic_rdists
->prop_page
= alloc_pages(GFP_NOWAIT
,
783 get_order(LPI_PROPBASE_SZ
));
784 if (!gic_rdists
->prop_page
) {
785 pr_err("Failed to allocate PROPBASE\n");
789 paddr
= page_to_phys(gic_rdists
->prop_page
);
790 pr_info("GIC: using LPI property table @%pa\n", &paddr
);
792 /* Priority 0xa0, Group-1, disabled */
793 memset(page_address(gic_rdists
->prop_page
),
794 LPI_PROP_DEFAULT_PRIO
| LPI_PROP_GROUP1
,
797 /* Make sure the GIC will observe the written configuration */
798 __flush_dcache_area(page_address(gic_rdists
->prop_page
), LPI_PROPBASE_SZ
);
803 static const char *its_base_type_string
[] = {
804 [GITS_BASER_TYPE_DEVICE
] = "Devices",
805 [GITS_BASER_TYPE_VCPU
] = "Virtual CPUs",
806 [GITS_BASER_TYPE_CPU
] = "Physical CPUs",
807 [GITS_BASER_TYPE_COLLECTION
] = "Interrupt Collections",
808 [GITS_BASER_TYPE_RESERVED5
] = "Reserved (5)",
809 [GITS_BASER_TYPE_RESERVED6
] = "Reserved (6)",
810 [GITS_BASER_TYPE_RESERVED7
] = "Reserved (7)",
813 static void its_free_tables(struct its_node
*its
)
817 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
818 if (its
->tables
[i
].base
) {
819 free_pages((unsigned long)its
->tables
[i
].base
,
820 its
->tables
[i
].order
);
821 its
->tables
[i
].base
= NULL
;
826 static int its_alloc_tables(const char *node_name
, struct its_node
*its
)
831 u64 shr
= GITS_BASER_InnerShareable
;
836 if (its
->flags
& ITS_FLAGS_WORKAROUND_CAVIUM_22375
) {
838 * erratum 22375: only alloc 8MB table size
839 * erratum 24313: ignore memory access type
842 ids
= 0x14; /* 20 bits, 8MB */
844 cache
= GITS_BASER_WaWb
;
845 typer
= readq_relaxed(its
->base
+ GITS_TYPER
);
846 ids
= GITS_TYPER_DEVBITS(typer
);
849 its
->device_ids
= ids
;
851 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
852 u64 val
= readq_relaxed(its
->base
+ GITS_BASER
+ i
* 8);
853 u64 type
= GITS_BASER_TYPE(val
);
854 u64 entry_size
= GITS_BASER_ENTRY_SIZE(val
);
855 int order
= get_order(psz
);
860 if (type
== GITS_BASER_TYPE_NONE
)
864 * Allocate as many entries as required to fit the
865 * range of device IDs that the ITS can grok... The ID
866 * space being incredibly sparse, this results in a
867 * massive waste of memory.
869 * For other tables, only allocate a single page.
871 if (type
== GITS_BASER_TYPE_DEVICE
) {
873 * 'order' was initialized earlier to the default page
874 * granule of the the ITS. We can't have an allocation
875 * smaller than that. If the requested allocation
876 * is smaller, round up to the default page granule.
878 order
= max(get_order((1UL << ids
) * entry_size
),
880 if (order
>= MAX_ORDER
) {
881 order
= MAX_ORDER
- 1;
882 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
888 alloc_pages
= (PAGE_ORDER_TO_SIZE(order
) / psz
);
889 if (alloc_pages
> GITS_BASER_PAGES_MAX
) {
890 alloc_pages
= GITS_BASER_PAGES_MAX
;
891 order
= get_order(GITS_BASER_PAGES_MAX
* psz
);
892 pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n",
893 node_name
, order
, alloc_pages
);
896 base
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
902 its
->tables
[i
].base
= base
;
903 its
->tables
[i
].order
= order
;
906 val
= (virt_to_phys(base
) |
907 (type
<< GITS_BASER_TYPE_SHIFT
) |
908 ((entry_size
- 1) << GITS_BASER_ENTRY_SIZE_SHIFT
) |
915 val
|= GITS_BASER_PAGE_SIZE_4K
;
918 val
|= GITS_BASER_PAGE_SIZE_16K
;
921 val
|= GITS_BASER_PAGE_SIZE_64K
;
925 val
|= alloc_pages
- 1;
926 its
->tables
[i
].val
= val
;
928 writeq_relaxed(val
, its
->base
+ GITS_BASER
+ i
* 8);
929 tmp
= readq_relaxed(its
->base
+ GITS_BASER
+ i
* 8);
931 if ((val
^ tmp
) & GITS_BASER_SHAREABILITY_MASK
) {
933 * Shareability didn't stick. Just use
934 * whatever the read reported, which is likely
935 * to be the only thing this redistributor
936 * supports. If that's zero, make it
937 * non-cacheable as well.
939 shr
= tmp
& GITS_BASER_SHAREABILITY_MASK
;
941 cache
= GITS_BASER_nC
;
942 __flush_dcache_area(base
, PAGE_ORDER_TO_SIZE(order
));
947 if ((val
^ tmp
) & GITS_BASER_PAGE_SIZE_MASK
) {
949 * Page size didn't stick. Let's try a smaller
950 * size and retry. If we reach 4K, then
951 * something is horribly wrong...
953 free_pages((unsigned long)base
, order
);
954 its
->tables
[i
].base
= NULL
;
959 goto retry_alloc_baser
;
962 goto retry_alloc_baser
;
967 pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
969 (unsigned long) val
, (unsigned long) tmp
);
974 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
975 (int)(PAGE_ORDER_TO_SIZE(order
) / entry_size
),
976 its_base_type_string
[type
],
977 (unsigned long)virt_to_phys(base
),
978 psz
/ SZ_1K
, (int)shr
>> GITS_BASER_SHAREABILITY_SHIFT
);
984 its_free_tables(its
);
989 static int its_alloc_collections(struct its_node
*its
)
991 its
->collections
= kzalloc(nr_cpu_ids
* sizeof(*its
->collections
),
993 if (!its
->collections
)
999 static void its_cpu_init_lpis(void)
1001 void __iomem
*rbase
= gic_data_rdist_rd_base();
1002 struct page
*pend_page
;
1005 /* If we didn't allocate the pending table yet, do it now */
1006 pend_page
= gic_data_rdist()->pend_page
;
1010 * The pending pages have to be at least 64kB aligned,
1011 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1013 pend_page
= alloc_pages(GFP_NOWAIT
| __GFP_ZERO
,
1014 get_order(max(LPI_PENDBASE_SZ
, SZ_64K
)));
1016 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1017 smp_processor_id());
1021 /* Make sure the GIC will observe the zero-ed page */
1022 __flush_dcache_area(page_address(pend_page
), LPI_PENDBASE_SZ
);
1024 paddr
= page_to_phys(pend_page
);
1025 pr_info("CPU%d: using LPI pending table @%pa\n",
1026 smp_processor_id(), &paddr
);
1027 gic_data_rdist()->pend_page
= pend_page
;
1031 val
= readl_relaxed(rbase
+ GICR_CTLR
);
1032 val
&= ~GICR_CTLR_ENABLE_LPIS
;
1033 writel_relaxed(val
, rbase
+ GICR_CTLR
);
1036 * Make sure any change to the table is observable by the GIC.
1041 val
= (page_to_phys(gic_rdists
->prop_page
) |
1042 GICR_PROPBASER_InnerShareable
|
1043 GICR_PROPBASER_WaWb
|
1044 ((LPI_NRBITS
- 1) & GICR_PROPBASER_IDBITS_MASK
));
1046 writeq_relaxed(val
, rbase
+ GICR_PROPBASER
);
1047 tmp
= readq_relaxed(rbase
+ GICR_PROPBASER
);
1049 if ((tmp
^ val
) & GICR_PROPBASER_SHAREABILITY_MASK
) {
1050 if (!(tmp
& GICR_PROPBASER_SHAREABILITY_MASK
)) {
1052 * The HW reports non-shareable, we must
1053 * remove the cacheability attributes as
1056 val
&= ~(GICR_PROPBASER_SHAREABILITY_MASK
|
1057 GICR_PROPBASER_CACHEABILITY_MASK
);
1058 val
|= GICR_PROPBASER_nC
;
1059 writeq_relaxed(val
, rbase
+ GICR_PROPBASER
);
1061 pr_info_once("GIC: using cache flushing for LPI property table\n");
1062 gic_rdists
->flags
|= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING
;
1066 val
= (page_to_phys(pend_page
) |
1067 GICR_PENDBASER_InnerShareable
|
1068 GICR_PENDBASER_WaWb
);
1070 writeq_relaxed(val
, rbase
+ GICR_PENDBASER
);
1071 tmp
= readq_relaxed(rbase
+ GICR_PENDBASER
);
1073 if (!(tmp
& GICR_PENDBASER_SHAREABILITY_MASK
)) {
1075 * The HW reports non-shareable, we must remove the
1076 * cacheability attributes as well.
1078 val
&= ~(GICR_PENDBASER_SHAREABILITY_MASK
|
1079 GICR_PENDBASER_CACHEABILITY_MASK
);
1080 val
|= GICR_PENDBASER_nC
;
1081 writeq_relaxed(val
, rbase
+ GICR_PENDBASER
);
1085 val
= readl_relaxed(rbase
+ GICR_CTLR
);
1086 val
|= GICR_CTLR_ENABLE_LPIS
;
1087 writel_relaxed(val
, rbase
+ GICR_CTLR
);
1089 /* Make sure the GIC has seen the above */
1093 static void its_cpu_init_collection(void)
1095 struct its_node
*its
;
1098 spin_lock(&its_lock
);
1099 cpu
= smp_processor_id();
1101 list_for_each_entry(its
, &its_nodes
, entry
) {
1105 * We now have to bind each collection to its target
1108 if (readq_relaxed(its
->base
+ GITS_TYPER
) & GITS_TYPER_PTA
) {
1110 * This ITS wants the physical address of the
1113 target
= gic_data_rdist()->phys_base
;
1116 * This ITS wants a linear CPU number.
1118 target
= readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER
);
1119 target
= GICR_TYPER_CPU_NUMBER(target
) << 16;
1122 /* Perform collection mapping */
1123 its
->collections
[cpu
].target_address
= target
;
1124 its
->collections
[cpu
].col_id
= cpu
;
1126 its_send_mapc(its
, &its
->collections
[cpu
], 1);
1127 its_send_invall(its
, &its
->collections
[cpu
]);
1130 spin_unlock(&its_lock
);
1133 static struct its_device
*its_find_device(struct its_node
*its
, u32 dev_id
)
1135 struct its_device
*its_dev
= NULL
, *tmp
;
1136 unsigned long flags
;
1138 raw_spin_lock_irqsave(&its
->lock
, flags
);
1140 list_for_each_entry(tmp
, &its
->its_device_list
, entry
) {
1141 if (tmp
->device_id
== dev_id
) {
1147 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
1152 static struct its_baser
*its_get_baser(struct its_node
*its
, u32 type
)
1156 for (i
= 0; i
< GITS_BASER_NR_REGS
; i
++) {
1157 if (GITS_BASER_TYPE(its
->tables
[i
].val
) == type
)
1158 return &its
->tables
[i
];
1164 static struct its_device
*its_create_device(struct its_node
*its
, u32 dev_id
,
1167 struct its_baser
*baser
;
1168 struct its_device
*dev
;
1169 unsigned long *lpi_map
;
1170 unsigned long flags
;
1171 u16
*col_map
= NULL
;
1178 baser
= its_get_baser(its
, GITS_BASER_TYPE_DEVICE
);
1180 /* Don't allow 'dev_id' that exceeds single, flat table limit */
1182 if (dev_id
>= (PAGE_ORDER_TO_SIZE(baser
->order
) /
1183 GITS_BASER_ENTRY_SIZE(baser
->val
)))
1185 } else if (ilog2(dev_id
) >= its
->device_ids
)
1188 dev
= kzalloc(sizeof(*dev
), GFP_KERNEL
);
1190 * At least one bit of EventID is being used, hence a minimum
1191 * of two entries. No, the architecture doesn't let you
1192 * express an ITT with a single entry.
1194 nr_ites
= max(2UL, roundup_pow_of_two(nvecs
));
1195 sz
= nr_ites
* its
->ite_size
;
1196 sz
= max(sz
, ITS_ITT_ALIGN
) + ITS_ITT_ALIGN
- 1;
1197 itt
= kzalloc(sz
, GFP_KERNEL
);
1198 lpi_map
= its_lpi_alloc_chunks(nvecs
, &lpi_base
, &nr_lpis
);
1200 col_map
= kzalloc(sizeof(*col_map
) * nr_lpis
, GFP_KERNEL
);
1202 if (!dev
|| !itt
|| !lpi_map
|| !col_map
) {
1210 __flush_dcache_area(itt
, sz
);
1214 dev
->nr_ites
= nr_ites
;
1215 dev
->event_map
.lpi_map
= lpi_map
;
1216 dev
->event_map
.col_map
= col_map
;
1217 dev
->event_map
.lpi_base
= lpi_base
;
1218 dev
->event_map
.nr_lpis
= nr_lpis
;
1219 dev
->device_id
= dev_id
;
1220 INIT_LIST_HEAD(&dev
->entry
);
1222 raw_spin_lock_irqsave(&its
->lock
, flags
);
1223 list_add(&dev
->entry
, &its
->its_device_list
);
1224 raw_spin_unlock_irqrestore(&its
->lock
, flags
);
1226 /* Map device to its ITT */
1227 its_send_mapd(dev
, 1);
1232 static void its_free_device(struct its_device
*its_dev
)
1234 unsigned long flags
;
1236 raw_spin_lock_irqsave(&its_dev
->its
->lock
, flags
);
1237 list_del(&its_dev
->entry
);
1238 raw_spin_unlock_irqrestore(&its_dev
->its
->lock
, flags
);
1239 kfree(its_dev
->itt
);
1243 static int its_alloc_device_irq(struct its_device
*dev
, irq_hw_number_t
*hwirq
)
1247 idx
= find_first_zero_bit(dev
->event_map
.lpi_map
,
1248 dev
->event_map
.nr_lpis
);
1249 if (idx
== dev
->event_map
.nr_lpis
)
1252 *hwirq
= dev
->event_map
.lpi_base
+ idx
;
1253 set_bit(idx
, dev
->event_map
.lpi_map
);
1258 static int its_msi_prepare(struct irq_domain
*domain
, struct device
*dev
,
1259 int nvec
, msi_alloc_info_t
*info
)
1261 struct its_node
*its
;
1262 struct its_device
*its_dev
;
1263 struct msi_domain_info
*msi_info
;
1267 * We ignore "dev" entierely, and rely on the dev_id that has
1268 * been passed via the scratchpad. This limits this domain's
1269 * usefulness to upper layers that definitely know that they
1270 * are built on top of the ITS.
1272 dev_id
= info
->scratchpad
[0].ul
;
1274 msi_info
= msi_get_domain_info(domain
);
1275 its
= msi_info
->data
;
1277 its_dev
= its_find_device(its
, dev_id
);
1280 * We already have seen this ID, probably through
1281 * another alias (PCI bridge of some sort). No need to
1282 * create the device.
1284 pr_debug("Reusing ITT for devID %x\n", dev_id
);
1288 its_dev
= its_create_device(its
, dev_id
, nvec
);
1292 pr_debug("ITT %d entries, %d bits\n", nvec
, ilog2(nvec
));
1294 info
->scratchpad
[0].ptr
= its_dev
;
1298 static struct msi_domain_ops its_msi_domain_ops
= {
1299 .msi_prepare
= its_msi_prepare
,
1302 static int its_irq_gic_domain_alloc(struct irq_domain
*domain
,
1304 irq_hw_number_t hwirq
)
1306 struct irq_fwspec fwspec
;
1308 if (irq_domain_get_of_node(domain
->parent
)) {
1309 fwspec
.fwnode
= domain
->parent
->fwnode
;
1310 fwspec
.param_count
= 3;
1311 fwspec
.param
[0] = GIC_IRQ_TYPE_LPI
;
1312 fwspec
.param
[1] = hwirq
;
1313 fwspec
.param
[2] = IRQ_TYPE_EDGE_RISING
;
1318 return irq_domain_alloc_irqs_parent(domain
, virq
, 1, &fwspec
);
1321 static int its_irq_domain_alloc(struct irq_domain
*domain
, unsigned int virq
,
1322 unsigned int nr_irqs
, void *args
)
1324 msi_alloc_info_t
*info
= args
;
1325 struct its_device
*its_dev
= info
->scratchpad
[0].ptr
;
1326 irq_hw_number_t hwirq
;
1330 for (i
= 0; i
< nr_irqs
; i
++) {
1331 err
= its_alloc_device_irq(its_dev
, &hwirq
);
1335 err
= its_irq_gic_domain_alloc(domain
, virq
+ i
, hwirq
);
1339 irq_domain_set_hwirq_and_chip(domain
, virq
+ i
,
1340 hwirq
, &its_irq_chip
, its_dev
);
1341 pr_debug("ID:%d pID:%d vID:%d\n",
1342 (int)(hwirq
- its_dev
->event_map
.lpi_base
),
1343 (int) hwirq
, virq
+ i
);
1349 static void its_irq_domain_activate(struct irq_domain
*domain
,
1352 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1353 u32 event
= its_get_event_id(d
);
1355 /* Bind the LPI to the first possible CPU */
1356 its_dev
->event_map
.col_map
[event
] = cpumask_first(cpu_online_mask
);
1358 /* Map the GIC IRQ and event to the device */
1359 its_send_mapvi(its_dev
, d
->hwirq
, event
);
1362 static void its_irq_domain_deactivate(struct irq_domain
*domain
,
1365 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1366 u32 event
= its_get_event_id(d
);
1368 /* Stop the delivery of interrupts */
1369 its_send_discard(its_dev
, event
);
1372 static void its_irq_domain_free(struct irq_domain
*domain
, unsigned int virq
,
1373 unsigned int nr_irqs
)
1375 struct irq_data
*d
= irq_domain_get_irq_data(domain
, virq
);
1376 struct its_device
*its_dev
= irq_data_get_irq_chip_data(d
);
1379 for (i
= 0; i
< nr_irqs
; i
++) {
1380 struct irq_data
*data
= irq_domain_get_irq_data(domain
,
1382 u32 event
= its_get_event_id(data
);
1384 /* Mark interrupt index as unused */
1385 clear_bit(event
, its_dev
->event_map
.lpi_map
);
1387 /* Nuke the entry in the domain */
1388 irq_domain_reset_irq_data(data
);
1391 /* If all interrupts have been freed, start mopping the floor */
1392 if (bitmap_empty(its_dev
->event_map
.lpi_map
,
1393 its_dev
->event_map
.nr_lpis
)) {
1394 its_lpi_free(&its_dev
->event_map
);
1396 /* Unmap device/itt */
1397 its_send_mapd(its_dev
, 0);
1398 its_free_device(its_dev
);
1401 irq_domain_free_irqs_parent(domain
, virq
, nr_irqs
);
1404 static const struct irq_domain_ops its_domain_ops
= {
1405 .alloc
= its_irq_domain_alloc
,
1406 .free
= its_irq_domain_free
,
1407 .activate
= its_irq_domain_activate
,
1408 .deactivate
= its_irq_domain_deactivate
,
1411 static int its_force_quiescent(void __iomem
*base
)
1413 u32 count
= 1000000; /* 1s */
1416 val
= readl_relaxed(base
+ GITS_CTLR
);
1417 if (val
& GITS_CTLR_QUIESCENT
)
1420 /* Disable the generation of all interrupts to this ITS */
1421 val
&= ~GITS_CTLR_ENABLE
;
1422 writel_relaxed(val
, base
+ GITS_CTLR
);
1424 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1426 val
= readl_relaxed(base
+ GITS_CTLR
);
1427 if (val
& GITS_CTLR_QUIESCENT
)
1439 static void __maybe_unused
its_enable_quirk_cavium_22375(void *data
)
1441 struct its_node
*its
= data
;
1443 its
->flags
|= ITS_FLAGS_WORKAROUND_CAVIUM_22375
;
1446 static const struct gic_quirk its_quirks
[] = {
1447 #ifdef CONFIG_CAVIUM_ERRATUM_22375
1449 .desc
= "ITS: Cavium errata 22375, 24313",
1450 .iidr
= 0xa100034c, /* ThunderX pass 1.x */
1452 .init
= its_enable_quirk_cavium_22375
,
1459 static void its_enable_quirks(struct its_node
*its
)
1461 u32 iidr
= readl_relaxed(its
->base
+ GITS_IIDR
);
1463 gic_enable_quirks(iidr
, its_quirks
, its
);
1466 static int __init
its_probe(struct device_node
*node
,
1467 struct irq_domain
*parent
)
1469 struct resource res
;
1470 struct its_node
*its
;
1471 void __iomem
*its_base
;
1472 struct irq_domain
*inner_domain
;
1477 err
= of_address_to_resource(node
, 0, &res
);
1479 pr_warn("%s: no regs?\n", node
->full_name
);
1483 its_base
= ioremap(res
.start
, resource_size(&res
));
1485 pr_warn("%s: unable to map registers\n", node
->full_name
);
1489 val
= readl_relaxed(its_base
+ GITS_PIDR2
) & GIC_PIDR2_ARCH_MASK
;
1490 if (val
!= 0x30 && val
!= 0x40) {
1491 pr_warn("%s: no ITS detected, giving up\n", node
->full_name
);
1496 err
= its_force_quiescent(its_base
);
1498 pr_warn("%s: failed to quiesce, giving up\n",
1503 pr_info("ITS: %s\n", node
->full_name
);
1505 its
= kzalloc(sizeof(*its
), GFP_KERNEL
);
1511 raw_spin_lock_init(&its
->lock
);
1512 INIT_LIST_HEAD(&its
->entry
);
1513 INIT_LIST_HEAD(&its
->its_device_list
);
1514 its
->base
= its_base
;
1515 its
->phys_base
= res
.start
;
1516 its
->ite_size
= ((readl_relaxed(its_base
+ GITS_TYPER
) >> 4) & 0xf) + 1;
1518 its
->cmd_base
= kzalloc(ITS_CMD_QUEUE_SZ
, GFP_KERNEL
);
1519 if (!its
->cmd_base
) {
1523 its
->cmd_write
= its
->cmd_base
;
1525 its_enable_quirks(its
);
1527 err
= its_alloc_tables(node
->full_name
, its
);
1531 err
= its_alloc_collections(its
);
1533 goto out_free_tables
;
1535 baser
= (virt_to_phys(its
->cmd_base
) |
1537 GITS_CBASER_InnerShareable
|
1538 (ITS_CMD_QUEUE_SZ
/ SZ_4K
- 1) |
1541 writeq_relaxed(baser
, its
->base
+ GITS_CBASER
);
1542 tmp
= readq_relaxed(its
->base
+ GITS_CBASER
);
1544 if ((tmp
^ baser
) & GITS_CBASER_SHAREABILITY_MASK
) {
1545 if (!(tmp
& GITS_CBASER_SHAREABILITY_MASK
)) {
1547 * The HW reports non-shareable, we must
1548 * remove the cacheability attributes as
1551 baser
&= ~(GITS_CBASER_SHAREABILITY_MASK
|
1552 GITS_CBASER_CACHEABILITY_MASK
);
1553 baser
|= GITS_CBASER_nC
;
1554 writeq_relaxed(baser
, its
->base
+ GITS_CBASER
);
1556 pr_info("ITS: using cache flushing for cmd queue\n");
1557 its
->flags
|= ITS_FLAGS_CMDQ_NEEDS_FLUSHING
;
1560 writeq_relaxed(0, its
->base
+ GITS_CWRITER
);
1561 writel_relaxed(GITS_CTLR_ENABLE
, its
->base
+ GITS_CTLR
);
1563 if (of_property_read_bool(node
, "msi-controller")) {
1564 struct msi_domain_info
*info
;
1566 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1569 goto out_free_tables
;
1572 inner_domain
= irq_domain_add_tree(node
, &its_domain_ops
, its
);
1573 if (!inner_domain
) {
1576 goto out_free_tables
;
1579 inner_domain
->parent
= parent
;
1580 inner_domain
->bus_token
= DOMAIN_BUS_NEXUS
;
1581 info
->ops
= &its_msi_domain_ops
;
1583 inner_domain
->host_data
= info
;
1586 spin_lock(&its_lock
);
1587 list_add(&its
->entry
, &its_nodes
);
1588 spin_unlock(&its_lock
);
1593 its_free_tables(its
);
1595 kfree(its
->cmd_base
);
1600 pr_err("ITS: failed probing %s (%d)\n", node
->full_name
, err
);
1604 static bool gic_rdists_supports_plpis(void)
1606 return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER
) & GICR_TYPER_PLPIS
);
1609 int its_cpu_init(void)
1611 if (!list_empty(&its_nodes
)) {
1612 if (!gic_rdists_supports_plpis()) {
1613 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1616 its_cpu_init_lpis();
1617 its_cpu_init_collection();
1623 static struct of_device_id its_device_id
[] = {
1624 { .compatible
= "arm,gic-v3-its", },
1628 int __init
its_init(struct device_node
*node
, struct rdists
*rdists
,
1629 struct irq_domain
*parent_domain
)
1631 struct device_node
*np
;
1633 for (np
= of_find_matching_node(node
, its_device_id
); np
;
1634 np
= of_find_matching_node(np
, its_device_id
)) {
1635 its_probe(np
, parent_domain
);
1638 if (list_empty(&its_nodes
)) {
1639 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1643 gic_rdists
= rdists
;
1644 its_alloc_lpi_tables();
1645 its_lpi_init(rdists
->id_bits
);