2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/smp.h>
12 #include <linux/irq.h>
13 #include <linux/spinlock.h>
14 #include <asm/irqflags-arcv2.h>
16 #include <asm/setup.h>
19 #define SOFTIRQ_IRQ 21
21 static char smp_cpuinfo_buf[128];
22 static int idu_detected;
24 static DEFINE_RAW_SPINLOCK(mcip_lock);
26 static void mcip_setup_per_cpu(int cpu)
28 smp_ipi_irq_setup(cpu, IPI_IRQ);
29 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
32 static void mcip_ipi_send(int cpu)
37 /* ARConnect can only send IPI to others */
38 if (unlikely(cpu == raw_smp_processor_id())) {
39 arc_softirq_trigger(SOFTIRQ_IRQ);
43 raw_spin_lock_irqsave(&mcip_lock, flags);
46 * If receiver already has a pending interrupt, elide sending this one.
47 * Linux cross core calling works well with concurrent IPIs
49 * see arch/arc/kernel/smp.c: ipi_send_msg_one()
51 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
52 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
54 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
56 raw_spin_unlock_irqrestore(&mcip_lock, flags);
58 #ifdef CONFIG_ARC_IPI_DBG
60 pr_info("IPI ACK delayed from cpu %d\n", cpu);
64 static void mcip_ipi_clear(int irq)
68 unsigned int __maybe_unused copy;
70 if (unlikely(irq == SOFTIRQ_IRQ)) {
71 arc_softirq_clear(irq);
75 raw_spin_lock_irqsave(&mcip_lock, flags);
77 /* Who sent the IPI */
78 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
80 copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
83 * In rare case, multiple concurrent IPIs sent to same target can
84 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
85 * "vectored" (multiple bits sets) as opposed to typical single bit
88 c = __ffs(cpu); /* 0,1,2,3 */
89 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
93 raw_spin_unlock_irqrestore(&mcip_lock, flags);
95 #ifdef CONFIG_ARC_IPI_DBG
97 pr_info("IPIs from %x coalesced to %x\n",
98 copy, raw_smp_processor_id());
102 static void mcip_probe_n_setup(void)
105 #ifdef CONFIG_CPU_BIG_ENDIAN
107 idu:1, llm:1, num_cores:6,
108 iocoh:1, gfrc:1, dbg:1, pad2:1,
109 msg:1, sem:1, ipi:1, pad:1,
113 pad:1, ipi:1, sem:1, msg:1,
114 pad2:1, dbg:1, gfrc:1, iocoh:1,
115 num_cores:6, llm:1, idu:1,
120 READ_BCR(ARC_REG_MCIP_BCR, mp);
122 sprintf(smp_cpuinfo_buf,
123 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
124 mp.ver, mp.num_cores,
125 IS_AVAIL1(mp.ipi, "IPI "),
126 IS_AVAIL1(mp.idu, "IDU "),
127 IS_AVAIL1(mp.llm, "LLM "),
128 IS_AVAIL1(mp.dbg, "DEBUG "),
129 IS_AVAIL1(mp.gfrc, "GFRC"));
131 idu_detected = mp.idu;
134 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
135 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
138 if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
139 panic("kernel trying to use non-existent GFRC\n");
142 struct plat_smp_ops plat_smp_ops = {
143 .info = smp_cpuinfo_buf,
144 .init_early_smp = mcip_probe_n_setup,
145 .init_per_cpu = mcip_setup_per_cpu,
146 .ipi_send = mcip_ipi_send,
147 .ipi_clear = mcip_ipi_clear,
150 /***************************************************************************
151 * ARCv2 Interrupt Distribution Unit (IDU)
153 * Connects external "COMMON" IRQs to core intc, providing:
154 * -dynamic routing (IRQ affinity)
155 * -load balancing (Round Robin interrupt distribution)
158 * It physically resides in the MCIP hw block
161 #include <linux/irqchip.h>
162 #include <linux/of.h>
163 #include <linux/of_irq.h>
166 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
168 static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
170 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
173 static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
179 unsigned int distr:2, pad:2, lvl:1, pad2:27;
185 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
188 static void idu_irq_mask(struct irq_data *data)
192 raw_spin_lock_irqsave(&mcip_lock, flags);
193 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
194 raw_spin_unlock_irqrestore(&mcip_lock, flags);
197 static void idu_irq_unmask(struct irq_data *data)
201 raw_spin_lock_irqsave(&mcip_lock, flags);
202 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
203 raw_spin_unlock_irqrestore(&mcip_lock, flags);
208 idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
214 /* errout if no online cpu per @cpumask */
215 if (!cpumask_and(&online, cpumask, cpu_online_mask))
218 raw_spin_lock_irqsave(&mcip_lock, flags);
220 idu_set_dest(data->hwirq, cpumask_bits(&online)[0]);
221 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
223 raw_spin_unlock_irqrestore(&mcip_lock, flags);
225 return IRQ_SET_MASK_OK;
229 static struct irq_chip idu_irq_chip = {
230 .name = "MCIP IDU Intc",
231 .irq_mask = idu_irq_mask,
232 .irq_unmask = idu_irq_unmask,
234 .irq_set_affinity = idu_irq_set_affinity,
239 static int idu_first_irq;
241 static void idu_cascade_isr(struct irq_desc *desc)
243 struct irq_domain *domain = irq_desc_get_handler_data(desc);
244 unsigned int core_irq = irq_desc_get_irq(desc);
245 unsigned int idu_irq;
247 idu_irq = core_irq - idu_first_irq;
248 generic_handle_irq(irq_find_mapping(domain, idu_irq));
251 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
253 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
254 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
259 static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
260 const u32 *intspec, unsigned int intsize,
261 irq_hw_number_t *out_hwirq, unsigned int *out_type)
263 irq_hw_number_t hwirq = *out_hwirq = intspec[0];
264 int distri = intspec[1];
267 *out_type = IRQ_TYPE_NONE;
269 /* XXX: validate distribution scheme again online cpu mask */
271 /* 0 - Round Robin to all cpus, otherwise 1 bit per core */
272 raw_spin_lock_irqsave(&mcip_lock, flags);
273 idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
274 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
275 raw_spin_unlock_irqrestore(&mcip_lock, flags);
278 * DEST based distribution for Level Triggered intr can only
279 * have 1 CPU, so generalize it to always contain 1 cpu
281 int cpu = ffs(distri);
283 if (cpu != fls(distri))
284 pr_warn("IDU irq %lx distri mode set to cpu %x\n",
287 raw_spin_lock_irqsave(&mcip_lock, flags);
288 idu_set_dest(hwirq, cpu);
289 idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
290 raw_spin_unlock_irqrestore(&mcip_lock, flags);
296 static const struct irq_domain_ops idu_irq_ops = {
297 .xlate = idu_irq_xlate,
302 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
303 * [24, 23+C]: If C > 0 then "C" common IRQs
304 * [24+C, N]: Not statically assigned, private-per-core
309 idu_of_init(struct device_node *intc, struct device_node *parent)
311 struct irq_domain *domain;
312 /* Read IDU BCR to confirm nr_irqs */
313 int nr_irqs = of_irq_count(intc);
317 panic("IDU not detected, but DeviceTree using it");
319 pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
321 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
323 /* Parent interrupts (core-intc) are already mapped */
325 for (i = 0; i < nr_irqs; i++) {
327 * Return parent uplink IRQs (towards core intc) 24,25,.....
328 * this step has been done before already
329 * however we need it to get the parent virq and set IDU handler
332 irq = irq_of_parse_and_map(intc, i);
336 irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain);
339 __mcip_cmd(CMD_IDU_ENABLE, 0);
343 IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);