2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/bitmap.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/dma-iommu.h>
22 #include <linux/interrupt.h>
23 #include <linux/log2.h>
25 #include <linux/msi.h>
27 #include <linux/of_address.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/percpu.h>
32 #include <linux/slab.h>
34 #include <linux/irqchip.h>
35 #include <linux/irqchip/arm-gic-v3.h>
37 #include <asm/cacheflush.h>
38 #include <asm/cputype.h>
39 #include <asm/exception.h>
41 #include "irq-gic-common.h"
43 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
44 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
45 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
47 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
50 * Collection structure - just an ID, and a redistributor address to
51 * ping. We use one per CPU as a bag of interrupts assigned to this
54 struct its_collection {
60 * The ITS_BASER structure - contains memory information, cached
61 * value of BASER register configuration and ITS page size.
71 * The ITS structure - contains most of the infrastructure, with the
72 * top-level MSI domain, the command queue, the collections, and the
73 * list of devices writing to it.
77 struct list_head entry;
79 unsigned long phys_base;
80 struct its_cmd_block *cmd_base;
81 struct its_cmd_block *cmd_write;
82 struct its_baser tables[GITS_BASER_NR_REGS];
83 struct its_collection *collections;
84 struct list_head its_device_list;
91 #define ITS_ITT_ALIGN SZ_256
93 /* Convert page order to size in bytes */
94 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
96 struct event_lpi_map {
97 unsigned long *lpi_map;
99 irq_hw_number_t lpi_base;
104 * The ITS view of a device - belongs to an ITS, a collection, owns an
105 * interrupt translation table, and a list of interrupts.
108 struct list_head entry;
109 struct its_node *its;
110 struct event_lpi_map event_map;
116 static LIST_HEAD(its_nodes);
117 static DEFINE_SPINLOCK(its_lock);
118 static struct rdists *gic_rdists;
120 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
121 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
123 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
126 struct its_node *its = its_dev->its;
128 return its->collections + its_dev->event_map.col_map[event];
132 * ITS command descriptors - parameters to be encoded in a command
135 struct its_cmd_desc {
138 struct its_device *dev;
143 struct its_device *dev;
148 struct its_device *dev;
153 struct its_collection *col;
158 struct its_device *dev;
164 struct its_device *dev;
165 struct its_collection *col;
170 struct its_device *dev;
175 struct its_collection *col;
181 * The ITS command block, which is what the ITS actually parses.
183 struct its_cmd_block {
187 #define ITS_CMD_QUEUE_SZ SZ_64K
188 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
190 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
191 struct its_cmd_desc *);
193 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
195 cmd->raw_cmd[0] &= ~0xffUL;
196 cmd->raw_cmd[0] |= cmd_nr;
199 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
201 cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
202 cmd->raw_cmd[0] |= ((u64)devid) << 32;
205 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
207 cmd->raw_cmd[1] &= ~0xffffffffUL;
208 cmd->raw_cmd[1] |= id;
211 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
213 cmd->raw_cmd[1] &= 0xffffffffUL;
214 cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
217 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
219 cmd->raw_cmd[1] &= ~0x1fUL;
220 cmd->raw_cmd[1] |= size & 0x1f;
223 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
225 cmd->raw_cmd[2] &= ~0xffffffffffffUL;
226 cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
229 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
231 cmd->raw_cmd[2] &= ~(1UL << 63);
232 cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
235 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
237 cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
238 cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
241 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
243 cmd->raw_cmd[2] &= ~0xffffUL;
244 cmd->raw_cmd[2] |= col;
247 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
249 /* Let's fixup BE commands */
250 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
251 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
252 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
253 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
256 static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
257 struct its_cmd_desc *desc)
259 unsigned long itt_addr;
260 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
262 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
263 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
265 its_encode_cmd(cmd, GITS_CMD_MAPD);
266 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
267 its_encode_size(cmd, size - 1);
268 its_encode_itt(cmd, itt_addr);
269 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
276 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
277 struct its_cmd_desc *desc)
279 its_encode_cmd(cmd, GITS_CMD_MAPC);
280 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
281 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
282 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
286 return desc->its_mapc_cmd.col;
289 static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
290 struct its_cmd_desc *desc)
292 struct its_collection *col;
294 col = dev_event_to_col(desc->its_mapvi_cmd.dev,
295 desc->its_mapvi_cmd.event_id);
297 its_encode_cmd(cmd, GITS_CMD_MAPVI);
298 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
299 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
300 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
301 its_encode_collection(cmd, col->col_id);
308 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
309 struct its_cmd_desc *desc)
311 struct its_collection *col;
313 col = dev_event_to_col(desc->its_movi_cmd.dev,
314 desc->its_movi_cmd.event_id);
316 its_encode_cmd(cmd, GITS_CMD_MOVI);
317 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
318 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
319 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
326 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
327 struct its_cmd_desc *desc)
329 struct its_collection *col;
331 col = dev_event_to_col(desc->its_discard_cmd.dev,
332 desc->its_discard_cmd.event_id);
334 its_encode_cmd(cmd, GITS_CMD_DISCARD);
335 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
336 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
343 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
344 struct its_cmd_desc *desc)
346 struct its_collection *col;
348 col = dev_event_to_col(desc->its_inv_cmd.dev,
349 desc->its_inv_cmd.event_id);
351 its_encode_cmd(cmd, GITS_CMD_INV);
352 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
353 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
360 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
361 struct its_cmd_desc *desc)
363 its_encode_cmd(cmd, GITS_CMD_INVALL);
364 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
371 static u64 its_cmd_ptr_to_offset(struct its_node *its,
372 struct its_cmd_block *ptr)
374 return (ptr - its->cmd_base) * sizeof(*ptr);
377 static int its_queue_full(struct its_node *its)
382 widx = its->cmd_write - its->cmd_base;
383 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
385 /* This is incredibly unlikely to happen, unless the ITS locks up. */
386 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
392 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
394 struct its_cmd_block *cmd;
395 u32 count = 1000000; /* 1s! */
397 while (its_queue_full(its)) {
400 pr_err_ratelimited("ITS queue not draining\n");
407 cmd = its->cmd_write++;
409 /* Handle queue wrapping */
410 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
411 its->cmd_write = its->cmd_base;
416 static struct its_cmd_block *its_post_commands(struct its_node *its)
418 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
420 writel_relaxed(wr, its->base + GITS_CWRITER);
422 return its->cmd_write;
425 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
428 * Make sure the commands written to memory are observable by
431 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
432 __flush_dcache_area(cmd, sizeof(*cmd));
437 static void its_wait_for_range_completion(struct its_node *its,
438 struct its_cmd_block *from,
439 struct its_cmd_block *to)
441 u64 rd_idx, from_idx, to_idx;
442 u32 count = 1000000; /* 1s! */
444 from_idx = its_cmd_ptr_to_offset(its, from);
445 to_idx = its_cmd_ptr_to_offset(its, to);
448 rd_idx = readl_relaxed(its->base + GITS_CREADR);
449 if (rd_idx >= to_idx || rd_idx < from_idx)
454 pr_err_ratelimited("ITS queue timeout\n");
462 static void its_send_single_command(struct its_node *its,
463 its_cmd_builder_t builder,
464 struct its_cmd_desc *desc)
466 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
467 struct its_collection *sync_col;
470 raw_spin_lock_irqsave(&its->lock, flags);
472 cmd = its_allocate_entry(its);
473 if (!cmd) { /* We're soooooo screewed... */
474 pr_err_ratelimited("ITS can't allocate, dropping command\n");
475 raw_spin_unlock_irqrestore(&its->lock, flags);
478 sync_col = builder(cmd, desc);
479 its_flush_cmd(its, cmd);
482 sync_cmd = its_allocate_entry(its);
484 pr_err_ratelimited("ITS can't SYNC, skipping\n");
487 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
488 its_encode_target(sync_cmd, sync_col->target_address);
489 its_fixup_cmd(sync_cmd);
490 its_flush_cmd(its, sync_cmd);
494 next_cmd = its_post_commands(its);
495 raw_spin_unlock_irqrestore(&its->lock, flags);
497 its_wait_for_range_completion(its, cmd, next_cmd);
500 static void its_send_inv(struct its_device *dev, u32 event_id)
502 struct its_cmd_desc desc;
504 desc.its_inv_cmd.dev = dev;
505 desc.its_inv_cmd.event_id = event_id;
507 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
510 static void its_send_mapd(struct its_device *dev, int valid)
512 struct its_cmd_desc desc;
514 desc.its_mapd_cmd.dev = dev;
515 desc.its_mapd_cmd.valid = !!valid;
517 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
520 static void its_send_mapc(struct its_node *its, struct its_collection *col,
523 struct its_cmd_desc desc;
525 desc.its_mapc_cmd.col = col;
526 desc.its_mapc_cmd.valid = !!valid;
528 its_send_single_command(its, its_build_mapc_cmd, &desc);
531 static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
533 struct its_cmd_desc desc;
535 desc.its_mapvi_cmd.dev = dev;
536 desc.its_mapvi_cmd.phys_id = irq_id;
537 desc.its_mapvi_cmd.event_id = id;
539 its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
542 static void its_send_movi(struct its_device *dev,
543 struct its_collection *col, u32 id)
545 struct its_cmd_desc desc;
547 desc.its_movi_cmd.dev = dev;
548 desc.its_movi_cmd.col = col;
549 desc.its_movi_cmd.event_id = id;
551 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
554 static void its_send_discard(struct its_device *dev, u32 id)
556 struct its_cmd_desc desc;
558 desc.its_discard_cmd.dev = dev;
559 desc.its_discard_cmd.event_id = id;
561 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
564 static void its_send_invall(struct its_node *its, struct its_collection *col)
566 struct its_cmd_desc desc;
568 desc.its_invall_cmd.col = col;
570 its_send_single_command(its, its_build_invall_cmd, &desc);
574 * irqchip functions - assumes MSI, mostly.
577 static inline u32 its_get_event_id(struct irq_data *d)
579 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
580 return d->hwirq - its_dev->event_map.lpi_base;
583 static void lpi_set_config(struct irq_data *d, bool enable)
585 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
586 irq_hw_number_t hwirq = d->hwirq;
587 u32 id = its_get_event_id(d);
588 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
591 *cfg |= LPI_PROP_ENABLED;
593 *cfg &= ~LPI_PROP_ENABLED;
596 * Make the above write visible to the redistributors.
597 * And yes, we're flushing exactly: One. Single. Byte.
600 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
601 __flush_dcache_area(cfg, sizeof(*cfg));
604 its_send_inv(its_dev, id);
607 static void its_mask_irq(struct irq_data *d)
609 lpi_set_config(d, false);
612 static void its_unmask_irq(struct irq_data *d)
614 lpi_set_config(d, true);
617 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
621 const struct cpumask *cpu_mask = cpu_online_mask;
622 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
623 struct its_collection *target_col;
624 u32 id = its_get_event_id(d);
626 /* lpi cannot be routed to a redistributor that is on a foreign node */
627 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
628 if (its_dev->its->numa_node >= 0) {
629 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
630 if (!cpumask_intersects(mask_val, cpu_mask))
635 cpu = cpumask_any_and(mask_val, cpu_mask);
637 if (cpu >= nr_cpu_ids)
640 target_col = &its_dev->its->collections[cpu];
641 its_send_movi(its_dev, target_col, id);
642 its_dev->event_map.col_map[id] = cpu;
644 return IRQ_SET_MASK_OK_DONE;
647 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
649 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
650 struct its_node *its;
654 addr = its->phys_base + GITS_TRANSLATER;
656 msg->address_lo = addr & ((1UL << 32) - 1);
657 msg->address_hi = addr >> 32;
658 msg->data = its_get_event_id(d);
660 iommu_dma_map_msi_msg(d->irq, msg);
663 static struct irq_chip its_irq_chip = {
665 .irq_mask = its_mask_irq,
666 .irq_unmask = its_unmask_irq,
667 .irq_eoi = irq_chip_eoi_parent,
668 .irq_set_affinity = its_set_affinity,
669 .irq_compose_msi_msg = its_irq_compose_msi_msg,
673 * How we allocate LPIs:
675 * The GIC has id_bits bits for interrupt identifiers. From there, we
676 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
677 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
680 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
682 #define IRQS_PER_CHUNK_SHIFT 5
683 #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
685 static unsigned long *lpi_bitmap;
686 static u32 lpi_chunks;
687 static DEFINE_SPINLOCK(lpi_lock);
689 static int its_lpi_to_chunk(int lpi)
691 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
694 static int its_chunk_to_lpi(int chunk)
696 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
699 static int __init its_lpi_init(u32 id_bits)
701 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
703 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
710 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
714 static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
716 unsigned long *bitmap = NULL;
721 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
723 spin_lock(&lpi_lock);
726 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
728 if (chunk_id < lpi_chunks)
732 } while (nr_chunks > 0);
737 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
742 for (i = 0; i < nr_chunks; i++)
743 set_bit(chunk_id + i, lpi_bitmap);
745 *base = its_chunk_to_lpi(chunk_id);
746 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
749 spin_unlock(&lpi_lock);
757 static void its_lpi_free(struct event_lpi_map *map)
759 int base = map->lpi_base;
760 int nr_ids = map->nr_lpis;
763 spin_lock(&lpi_lock);
765 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
766 int chunk = its_lpi_to_chunk(lpi);
767 BUG_ON(chunk > lpi_chunks);
768 if (test_bit(chunk, lpi_bitmap)) {
769 clear_bit(chunk, lpi_bitmap);
771 pr_err("Bad LPI chunk %d\n", chunk);
775 spin_unlock(&lpi_lock);
782 * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
783 * deal with (one configuration byte per interrupt). PENDBASE has to
784 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
786 #define LPI_PROPBASE_SZ SZ_64K
787 #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
790 * This is how many bits of ID we need, including the useless ones.
792 #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
794 #define LPI_PROP_DEFAULT_PRIO 0xa0
796 static int __init its_alloc_lpi_tables(void)
800 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
801 get_order(LPI_PROPBASE_SZ));
802 if (!gic_rdists->prop_page) {
803 pr_err("Failed to allocate PROPBASE\n");
807 paddr = page_to_phys(gic_rdists->prop_page);
808 pr_info("GIC: using LPI property table @%pa\n", &paddr);
810 /* Priority 0xa0, Group-1, disabled */
811 memset(page_address(gic_rdists->prop_page),
812 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
815 /* Make sure the GIC will observe the written configuration */
816 __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
821 static const char *its_base_type_string[] = {
822 [GITS_BASER_TYPE_DEVICE] = "Devices",
823 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
824 [GITS_BASER_TYPE_CPU] = "Physical CPUs",
825 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
826 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
827 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
828 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
831 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
833 u32 idx = baser - its->tables;
835 return readq_relaxed(its->base + GITS_BASER + (idx << 3));
838 static void its_write_baser(struct its_node *its, struct its_baser *baser,
841 u32 idx = baser - its->tables;
843 writeq_relaxed(val, its->base + GITS_BASER + (idx << 3));
844 baser->val = its_read_baser(its, baser);
847 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
848 u64 cache, u64 shr, u32 psz, u32 order,
851 u64 val = its_read_baser(its, baser);
852 u64 esz = GITS_BASER_ENTRY_SIZE(val);
853 u64 type = GITS_BASER_TYPE(val);
859 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
860 if (alloc_pages > GITS_BASER_PAGES_MAX) {
861 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
862 &its->phys_base, its_base_type_string[type],
863 alloc_pages, GITS_BASER_PAGES_MAX);
864 alloc_pages = GITS_BASER_PAGES_MAX;
865 order = get_order(GITS_BASER_PAGES_MAX * psz);
868 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
873 val = (virt_to_phys(base) |
874 (type << GITS_BASER_TYPE_SHIFT) |
875 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
876 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
881 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
885 val |= GITS_BASER_PAGE_SIZE_4K;
888 val |= GITS_BASER_PAGE_SIZE_16K;
891 val |= GITS_BASER_PAGE_SIZE_64K;
895 its_write_baser(its, baser, val);
898 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
900 * Shareability didn't stick. Just use
901 * whatever the read reported, which is likely
902 * to be the only thing this redistributor
903 * supports. If that's zero, make it
904 * non-cacheable as well.
906 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
908 cache = GITS_BASER_nC;
909 __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
914 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
916 * Page size didn't stick. Let's try a smaller
917 * size and retry. If we reach 4K, then
918 * something is horribly wrong...
920 free_pages((unsigned long)base, order);
926 goto retry_alloc_baser;
929 goto retry_alloc_baser;
934 pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n",
935 &its->phys_base, its_base_type_string[type],
936 (unsigned long) val, (unsigned long) tmp);
937 free_pages((unsigned long)base, order);
941 baser->order = order;
944 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
946 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
947 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp),
948 its_base_type_string[type],
949 (unsigned long)virt_to_phys(base),
950 indirect ? "indirect" : "flat", (int)esz,
951 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
956 static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser,
959 u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser));
960 u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb;
961 u32 ids = its->device_ids;
962 u32 new_order = *order;
963 bool indirect = false;
965 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
966 if ((esz << ids) > (psz * 2)) {
968 * Find out whether hw supports a single or two-level table by
969 * table by reading bit at offset '62' after writing '1' to it.
971 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
972 indirect = !!(baser->val & GITS_BASER_INDIRECT);
976 * The size of the lvl2 table is equal to ITS page size
977 * which is 'psz'. For computing lvl1 table size,
978 * subtract ID bits that sparse lvl2 table from 'ids'
979 * which is reported by ITS hardware times lvl1 table
982 ids -= ilog2(psz / esz);
983 esz = GITS_LVL1_ENTRY_SIZE;
988 * Allocate as many entries as required to fit the
989 * range of device IDs that the ITS can grok... The ID
990 * space being incredibly sparse, this results in a
991 * massive waste of memory if two-level device table
992 * feature is not supported by hardware.
994 new_order = max_t(u32, get_order(esz << ids), new_order);
995 if (new_order >= MAX_ORDER) {
996 new_order = MAX_ORDER - 1;
997 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz);
998 pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n",
999 &its->phys_base, its->device_ids, ids);
1007 static void its_free_tables(struct its_node *its)
1011 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1012 if (its->tables[i].base) {
1013 free_pages((unsigned long)its->tables[i].base,
1014 its->tables[i].order);
1015 its->tables[i].base = NULL;
1020 static int its_alloc_tables(struct its_node *its)
1022 u64 typer = readq_relaxed(its->base + GITS_TYPER);
1023 u32 ids = GITS_TYPER_DEVBITS(typer);
1024 u64 shr = GITS_BASER_InnerShareable;
1025 u64 cache = GITS_BASER_WaWb;
1029 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
1031 * erratum 22375: only alloc 8MB table size
1032 * erratum 24313: ignore memory access type
1034 cache = GITS_BASER_nCnB;
1035 ids = 0x14; /* 20 bits, 8MB */
1038 its->device_ids = ids;
1040 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1041 struct its_baser *baser = its->tables + i;
1042 u64 val = its_read_baser(its, baser);
1043 u64 type = GITS_BASER_TYPE(val);
1044 u32 order = get_order(psz);
1045 bool indirect = false;
1047 if (type == GITS_BASER_TYPE_NONE)
1050 if (type == GITS_BASER_TYPE_DEVICE)
1051 indirect = its_parse_baser_device(its, baser, psz, &order);
1053 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1055 its_free_tables(its);
1059 /* Update settings which will be used for next BASERn */
1061 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1062 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1068 static int its_alloc_collections(struct its_node *its)
1070 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
1072 if (!its->collections)
1078 static void its_cpu_init_lpis(void)
1080 void __iomem *rbase = gic_data_rdist_rd_base();
1081 struct page *pend_page;
1084 /* If we didn't allocate the pending table yet, do it now */
1085 pend_page = gic_data_rdist()->pend_page;
1089 * The pending pages have to be at least 64kB aligned,
1090 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1092 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
1093 get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
1095 pr_err("Failed to allocate PENDBASE for CPU%d\n",
1096 smp_processor_id());
1100 /* Make sure the GIC will observe the zero-ed page */
1101 __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
1103 paddr = page_to_phys(pend_page);
1104 pr_info("CPU%d: using LPI pending table @%pa\n",
1105 smp_processor_id(), &paddr);
1106 gic_data_rdist()->pend_page = pend_page;
1110 val = readl_relaxed(rbase + GICR_CTLR);
1111 val &= ~GICR_CTLR_ENABLE_LPIS;
1112 writel_relaxed(val, rbase + GICR_CTLR);
1115 * Make sure any change to the table is observable by the GIC.
1120 val = (page_to_phys(gic_rdists->prop_page) |
1121 GICR_PROPBASER_InnerShareable |
1122 GICR_PROPBASER_WaWb |
1123 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1125 writeq_relaxed(val, rbase + GICR_PROPBASER);
1126 tmp = readq_relaxed(rbase + GICR_PROPBASER);
1128 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1129 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1131 * The HW reports non-shareable, we must
1132 * remove the cacheability attributes as
1135 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1136 GICR_PROPBASER_CACHEABILITY_MASK);
1137 val |= GICR_PROPBASER_nC;
1138 writeq_relaxed(val, rbase + GICR_PROPBASER);
1140 pr_info_once("GIC: using cache flushing for LPI property table\n");
1141 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1145 val = (page_to_phys(pend_page) |
1146 GICR_PENDBASER_InnerShareable |
1147 GICR_PENDBASER_WaWb);
1149 writeq_relaxed(val, rbase + GICR_PENDBASER);
1150 tmp = readq_relaxed(rbase + GICR_PENDBASER);
1152 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1154 * The HW reports non-shareable, we must remove the
1155 * cacheability attributes as well.
1157 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1158 GICR_PENDBASER_CACHEABILITY_MASK);
1159 val |= GICR_PENDBASER_nC;
1160 writeq_relaxed(val, rbase + GICR_PENDBASER);
1164 val = readl_relaxed(rbase + GICR_CTLR);
1165 val |= GICR_CTLR_ENABLE_LPIS;
1166 writel_relaxed(val, rbase + GICR_CTLR);
1168 /* Make sure the GIC has seen the above */
1172 static void its_cpu_init_collection(void)
1174 struct its_node *its;
1177 spin_lock(&its_lock);
1178 cpu = smp_processor_id();
1180 list_for_each_entry(its, &its_nodes, entry) {
1183 /* avoid cross node collections and its mapping */
1184 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1185 struct device_node *cpu_node;
1187 cpu_node = of_get_cpu_node(cpu, NULL);
1188 if (its->numa_node != NUMA_NO_NODE &&
1189 its->numa_node != of_node_to_nid(cpu_node))
1194 * We now have to bind each collection to its target
1197 if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1199 * This ITS wants the physical address of the
1202 target = gic_data_rdist()->phys_base;
1205 * This ITS wants a linear CPU number.
1207 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
1208 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1211 /* Perform collection mapping */
1212 its->collections[cpu].target_address = target;
1213 its->collections[cpu].col_id = cpu;
1215 its_send_mapc(its, &its->collections[cpu], 1);
1216 its_send_invall(its, &its->collections[cpu]);
1219 spin_unlock(&its_lock);
1222 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1224 struct its_device *its_dev = NULL, *tmp;
1225 unsigned long flags;
1227 raw_spin_lock_irqsave(&its->lock, flags);
1229 list_for_each_entry(tmp, &its->its_device_list, entry) {
1230 if (tmp->device_id == dev_id) {
1236 raw_spin_unlock_irqrestore(&its->lock, flags);
1241 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
1245 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1246 if (GITS_BASER_TYPE(its->tables[i].val) == type)
1247 return &its->tables[i];
1253 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
1255 struct its_baser *baser;
1260 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
1262 /* Don't allow device id that exceeds ITS hardware limit */
1264 return (ilog2(dev_id) < its->device_ids);
1266 /* Don't allow device id that exceeds single, flat table limit */
1267 esz = GITS_BASER_ENTRY_SIZE(baser->val);
1268 if (!(baser->val & GITS_BASER_INDIRECT))
1269 return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
1271 /* Compute 1st level table index & check if that exceeds table limit */
1272 idx = dev_id >> ilog2(baser->psz / esz);
1273 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
1276 table = baser->base;
1278 /* Allocate memory for 2nd level table */
1280 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
1284 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
1285 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1286 __flush_dcache_area(page_address(page), baser->psz);
1288 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
1290 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
1291 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
1292 __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE);
1294 /* Ensure updated table contents are visible to ITS hardware */
1301 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1304 struct its_device *dev;
1305 unsigned long *lpi_map;
1306 unsigned long flags;
1307 u16 *col_map = NULL;
1314 if (!its_alloc_device_table(its, dev_id))
1317 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1319 * At least one bit of EventID is being used, hence a minimum
1320 * of two entries. No, the architecture doesn't let you
1321 * express an ITT with a single entry.
1323 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1324 sz = nr_ites * its->ite_size;
1325 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1326 itt = kzalloc(sz, GFP_KERNEL);
1327 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1329 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
1331 if (!dev || !itt || !lpi_map || !col_map) {
1339 __flush_dcache_area(itt, sz);
1343 dev->nr_ites = nr_ites;
1344 dev->event_map.lpi_map = lpi_map;
1345 dev->event_map.col_map = col_map;
1346 dev->event_map.lpi_base = lpi_base;
1347 dev->event_map.nr_lpis = nr_lpis;
1348 dev->device_id = dev_id;
1349 INIT_LIST_HEAD(&dev->entry);
1351 raw_spin_lock_irqsave(&its->lock, flags);
1352 list_add(&dev->entry, &its->its_device_list);
1353 raw_spin_unlock_irqrestore(&its->lock, flags);
1355 /* Map device to its ITT */
1356 its_send_mapd(dev, 1);
1361 static void its_free_device(struct its_device *its_dev)
1363 unsigned long flags;
1365 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1366 list_del(&its_dev->entry);
1367 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1368 kfree(its_dev->itt);
1372 static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1376 idx = find_first_zero_bit(dev->event_map.lpi_map,
1377 dev->event_map.nr_lpis);
1378 if (idx == dev->event_map.nr_lpis)
1381 *hwirq = dev->event_map.lpi_base + idx;
1382 set_bit(idx, dev->event_map.lpi_map);
1387 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1388 int nvec, msi_alloc_info_t *info)
1390 struct its_node *its;
1391 struct its_device *its_dev;
1392 struct msi_domain_info *msi_info;
1396 * We ignore "dev" entierely, and rely on the dev_id that has
1397 * been passed via the scratchpad. This limits this domain's
1398 * usefulness to upper layers that definitely know that they
1399 * are built on top of the ITS.
1401 dev_id = info->scratchpad[0].ul;
1403 msi_info = msi_get_domain_info(domain);
1404 its = msi_info->data;
1406 its_dev = its_find_device(its, dev_id);
1409 * We already have seen this ID, probably through
1410 * another alias (PCI bridge of some sort). No need to
1411 * create the device.
1413 pr_debug("Reusing ITT for devID %x\n", dev_id);
1417 its_dev = its_create_device(its, dev_id, nvec);
1421 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
1423 info->scratchpad[0].ptr = its_dev;
1427 static struct msi_domain_ops its_msi_domain_ops = {
1428 .msi_prepare = its_msi_prepare,
1431 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1433 irq_hw_number_t hwirq)
1435 struct irq_fwspec fwspec;
1437 if (irq_domain_get_of_node(domain->parent)) {
1438 fwspec.fwnode = domain->parent->fwnode;
1439 fwspec.param_count = 3;
1440 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
1441 fwspec.param[1] = hwirq;
1442 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
1447 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
1450 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1451 unsigned int nr_irqs, void *args)
1453 msi_alloc_info_t *info = args;
1454 struct its_device *its_dev = info->scratchpad[0].ptr;
1455 irq_hw_number_t hwirq;
1459 for (i = 0; i < nr_irqs; i++) {
1460 err = its_alloc_device_irq(its_dev, &hwirq);
1464 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1468 irq_domain_set_hwirq_and_chip(domain, virq + i,
1469 hwirq, &its_irq_chip, its_dev);
1470 pr_debug("ID:%d pID:%d vID:%d\n",
1471 (int)(hwirq - its_dev->event_map.lpi_base),
1472 (int) hwirq, virq + i);
1478 static void its_irq_domain_activate(struct irq_domain *domain,
1481 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1482 u32 event = its_get_event_id(d);
1483 const struct cpumask *cpu_mask = cpu_online_mask;
1485 /* get the cpu_mask of local node */
1486 if (its_dev->its->numa_node >= 0)
1487 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1489 /* Bind the LPI to the first possible CPU */
1490 its_dev->event_map.col_map[event] = cpumask_first(cpu_mask);
1492 /* Map the GIC IRQ and event to the device */
1493 its_send_mapvi(its_dev, d->hwirq, event);
1496 static void its_irq_domain_deactivate(struct irq_domain *domain,
1499 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1500 u32 event = its_get_event_id(d);
1502 /* Stop the delivery of interrupts */
1503 its_send_discard(its_dev, event);
1506 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1507 unsigned int nr_irqs)
1509 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1510 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1513 for (i = 0; i < nr_irqs; i++) {
1514 struct irq_data *data = irq_domain_get_irq_data(domain,
1516 u32 event = its_get_event_id(data);
1518 /* Mark interrupt index as unused */
1519 clear_bit(event, its_dev->event_map.lpi_map);
1521 /* Nuke the entry in the domain */
1522 irq_domain_reset_irq_data(data);
1525 /* If all interrupts have been freed, start mopping the floor */
1526 if (bitmap_empty(its_dev->event_map.lpi_map,
1527 its_dev->event_map.nr_lpis)) {
1528 its_lpi_free(&its_dev->event_map);
1530 /* Unmap device/itt */
1531 its_send_mapd(its_dev, 0);
1532 its_free_device(its_dev);
1535 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1538 static const struct irq_domain_ops its_domain_ops = {
1539 .alloc = its_irq_domain_alloc,
1540 .free = its_irq_domain_free,
1541 .activate = its_irq_domain_activate,
1542 .deactivate = its_irq_domain_deactivate,
1545 static int its_force_quiescent(void __iomem *base)
1547 u32 count = 1000000; /* 1s */
1550 val = readl_relaxed(base + GITS_CTLR);
1552 * GIC architecture specification requires the ITS to be both
1553 * disabled and quiescent for writes to GITS_BASER<n> or
1554 * GITS_CBASER to not have UNPREDICTABLE results.
1556 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
1559 /* Disable the generation of all interrupts to this ITS */
1560 val &= ~GITS_CTLR_ENABLE;
1561 writel_relaxed(val, base + GITS_CTLR);
1563 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1565 val = readl_relaxed(base + GITS_CTLR);
1566 if (val & GITS_CTLR_QUIESCENT)
1578 static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
1580 struct its_node *its = data;
1582 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
1585 static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
1587 struct its_node *its = data;
1589 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
1592 static const struct gic_quirk its_quirks[] = {
1593 #ifdef CONFIG_CAVIUM_ERRATUM_22375
1595 .desc = "ITS: Cavium errata 22375, 24313",
1596 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1598 .init = its_enable_quirk_cavium_22375,
1601 #ifdef CONFIG_CAVIUM_ERRATUM_23144
1603 .desc = "ITS: Cavium erratum 23144",
1604 .iidr = 0xa100034c, /* ThunderX pass 1.x */
1606 .init = its_enable_quirk_cavium_23144,
1613 static void its_enable_quirks(struct its_node *its)
1615 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
1617 gic_enable_quirks(iidr, its_quirks, its);
1620 static int __init its_probe(struct device_node *node,
1621 struct irq_domain *parent)
1623 struct resource res;
1624 struct its_node *its;
1625 void __iomem *its_base;
1626 struct irq_domain *inner_domain;
1631 err = of_address_to_resource(node, 0, &res);
1633 pr_warn("%s: no regs?\n", node->full_name);
1637 its_base = ioremap(res.start, resource_size(&res));
1639 pr_warn("%s: unable to map registers\n", node->full_name);
1643 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1644 if (val != 0x30 && val != 0x40) {
1645 pr_warn("%s: no ITS detected, giving up\n", node->full_name);
1650 err = its_force_quiescent(its_base);
1652 pr_warn("%s: failed to quiesce, giving up\n",
1657 pr_info("ITS: %s\n", node->full_name);
1659 its = kzalloc(sizeof(*its), GFP_KERNEL);
1665 raw_spin_lock_init(&its->lock);
1666 INIT_LIST_HEAD(&its->entry);
1667 INIT_LIST_HEAD(&its->its_device_list);
1668 its->base = its_base;
1669 its->phys_base = res.start;
1670 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1671 its->numa_node = of_node_to_nid(node);
1673 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1674 if (!its->cmd_base) {
1678 its->cmd_write = its->cmd_base;
1680 its_enable_quirks(its);
1682 err = its_alloc_tables(its);
1686 err = its_alloc_collections(its);
1688 goto out_free_tables;
1690 baser = (virt_to_phys(its->cmd_base) |
1692 GITS_CBASER_InnerShareable |
1693 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
1696 writeq_relaxed(baser, its->base + GITS_CBASER);
1697 tmp = readq_relaxed(its->base + GITS_CBASER);
1699 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
1700 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
1702 * The HW reports non-shareable, we must
1703 * remove the cacheability attributes as
1706 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
1707 GITS_CBASER_CACHEABILITY_MASK);
1708 baser |= GITS_CBASER_nC;
1709 writeq_relaxed(baser, its->base + GITS_CBASER);
1711 pr_info("ITS: using cache flushing for cmd queue\n");
1712 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1715 writeq_relaxed(0, its->base + GITS_CWRITER);
1716 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1718 if (of_property_read_bool(node, "msi-controller")) {
1719 struct msi_domain_info *info;
1721 info = kzalloc(sizeof(*info), GFP_KERNEL);
1724 goto out_free_tables;
1727 inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
1728 if (!inner_domain) {
1731 goto out_free_tables;
1734 inner_domain->parent = parent;
1735 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
1736 info->ops = &its_msi_domain_ops;
1738 inner_domain->host_data = info;
1741 spin_lock(&its_lock);
1742 list_add(&its->entry, &its_nodes);
1743 spin_unlock(&its_lock);
1748 its_free_tables(its);
1750 kfree(its->cmd_base);
1755 pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
1759 static bool gic_rdists_supports_plpis(void)
1761 return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
1764 int its_cpu_init(void)
1766 if (!list_empty(&its_nodes)) {
1767 if (!gic_rdists_supports_plpis()) {
1768 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1771 its_cpu_init_lpis();
1772 its_cpu_init_collection();
1778 static struct of_device_id its_device_id[] = {
1779 { .compatible = "arm,gic-v3-its", },
1783 int __init its_init(struct device_node *node, struct rdists *rdists,
1784 struct irq_domain *parent_domain)
1786 struct device_node *np;
1788 for (np = of_find_matching_node(node, its_device_id); np;
1789 np = of_find_matching_node(np, its_device_id)) {
1790 its_probe(np, parent_domain);
1793 if (list_empty(&its_nodes)) {
1794 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1798 gic_rdists = rdists;
1799 its_alloc_lpi_tables();
1800 its_lpi_init(rdists->id_bits);