1 /*
2  * Copyright 2001 MontaVista Software Inc.
3  * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
4  *
5  * Copyright (C) 2001 Ralf Baechle
6  * Copyright (C) 2005  MIPS Technologies, Inc.	All rights reserved.
7  *	Author: Maciej W. Rozycki <macro@mips.com>
8  *
9  * This file define the irq handler for MIPS CPU interrupts.
10  *
11  * This program is free software; you can redistribute	it and/or modify it
12  * under  the terms of	the GNU General	 Public License as published by the
13  * Free Software Foundation;  either version 2 of the  License, or (at your
14  * option) any later version.
15  */
16 
17 /*
18  * Almost all MIPS CPUs define 8 interrupt sources.  They are typically
19  * level triggered (i.e., cannot be cleared from CPU; must be cleared from
20  * device).
21  *
22  * The first two are software interrupts (i.e. not exposed as pins) which
23  * may be used for IPIs in multi-threaded single-core systems.
24  *
25  * The last one is usually the CPU timer interrupt if the counter register
26  * is present, or for old CPUs with an external FPU by convention it's the
27  * FPU exception interrupt.
28  */
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
31 #include <linux/kernel.h>
32 #include <linux/irq.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqdomain.h>
35 
36 #include <asm/irq_cpu.h>
37 #include <asm/mipsregs.h>
38 #include <asm/mipsmtregs.h>
39 #include <asm/setup.h>
40 
41 static struct irq_domain *irq_domain;
42 static struct irq_domain *ipi_domain;
43 
unmask_mips_irq(struct irq_data * d)44 static inline void unmask_mips_irq(struct irq_data *d)
45 {
46 	set_c0_status(IE_SW0 << d->hwirq);
47 	irq_enable_hazard();
48 }
49 
mask_mips_irq(struct irq_data * d)50 static inline void mask_mips_irq(struct irq_data *d)
51 {
52 	clear_c0_status(IE_SW0 << d->hwirq);
53 	irq_disable_hazard();
54 }
55 
56 static struct irq_chip mips_cpu_irq_controller = {
57 	.name		= "MIPS",
58 	.irq_ack	= mask_mips_irq,
59 	.irq_mask	= mask_mips_irq,
60 	.irq_mask_ack	= mask_mips_irq,
61 	.irq_unmask	= unmask_mips_irq,
62 	.irq_eoi	= unmask_mips_irq,
63 	.irq_disable	= mask_mips_irq,
64 	.irq_enable	= unmask_mips_irq,
65 };
66 
67 /*
68  * Basically the same as above but taking care of all the MT stuff
69  */
70 
mips_mt_cpu_irq_startup(struct irq_data * d)71 static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
72 {
73 	unsigned int vpflags = dvpe();
74 
75 	clear_c0_cause(C_SW0 << d->hwirq);
76 	evpe(vpflags);
77 	unmask_mips_irq(d);
78 	return 0;
79 }
80 
81 /*
82  * While we ack the interrupt interrupts are disabled and thus we don't need
83  * to deal with concurrency issues.  Same for mips_cpu_irq_end.
84  */
mips_mt_cpu_irq_ack(struct irq_data * d)85 static void mips_mt_cpu_irq_ack(struct irq_data *d)
86 {
87 	unsigned int vpflags = dvpe();
88 	clear_c0_cause(C_SW0 << d->hwirq);
89 	evpe(vpflags);
90 	mask_mips_irq(d);
91 }
92 
93 #ifdef CONFIG_GENERIC_IRQ_IPI
94 
mips_mt_send_ipi(struct irq_data * d,unsigned int cpu)95 static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
96 {
97 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
98 	unsigned long flags;
99 	int vpflags;
100 
101 	local_irq_save(flags);
102 
103 	/* We can only send IPIs to VPEs within the local core */
104 	WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
105 
106 	vpflags = dvpe();
107 	settc(cpu_vpe_id(&cpu_data[cpu]));
108 	write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
109 	evpe(vpflags);
110 
111 	local_irq_restore(flags);
112 }
113 
114 #endif /* CONFIG_GENERIC_IRQ_IPI */
115 
116 static struct irq_chip mips_mt_cpu_irq_controller = {
117 	.name		= "MIPS",
118 	.irq_startup	= mips_mt_cpu_irq_startup,
119 	.irq_ack	= mips_mt_cpu_irq_ack,
120 	.irq_mask	= mask_mips_irq,
121 	.irq_mask_ack	= mips_mt_cpu_irq_ack,
122 	.irq_unmask	= unmask_mips_irq,
123 	.irq_eoi	= unmask_mips_irq,
124 	.irq_disable	= mask_mips_irq,
125 	.irq_enable	= unmask_mips_irq,
126 #ifdef CONFIG_GENERIC_IRQ_IPI
127 	.ipi_send_single = mips_mt_send_ipi,
128 #endif
129 };
130 
plat_irq_dispatch(void)131 asmlinkage void __weak plat_irq_dispatch(void)
132 {
133 	unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
134 	unsigned int virq;
135 	int irq;
136 
137 	if (!pending) {
138 		spurious_interrupt();
139 		return;
140 	}
141 
142 	pending >>= CAUSEB_IP;
143 	while (pending) {
144 		irq = fls(pending) - 1;
145 		if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
146 			virq = irq_linear_revmap(ipi_domain, irq);
147 		else
148 			virq = irq_linear_revmap(irq_domain, irq);
149 		do_IRQ(virq);
150 		pending &= ~BIT(irq);
151 	}
152 }
153 
mips_cpu_intc_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)154 static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
155 			     irq_hw_number_t hw)
156 {
157 	struct irq_chip *chip;
158 
159 	if (hw < 2 && cpu_has_mipsmt) {
160 		/* Software interrupts are used for MT/CMT IPI */
161 		chip = &mips_mt_cpu_irq_controller;
162 	} else {
163 		chip = &mips_cpu_irq_controller;
164 	}
165 
166 	if (cpu_has_vint)
167 		set_vi_handler(hw, plat_irq_dispatch);
168 
169 	irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
170 
171 	return 0;
172 }
173 
174 static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
175 	.map = mips_cpu_intc_map,
176 	.xlate = irq_domain_xlate_onecell,
177 };
178 
179 #ifdef CONFIG_GENERIC_IRQ_IPI
180 
181 struct cpu_ipi_domain_state {
182 	DECLARE_BITMAP(allocated, 2);
183 };
184 
mips_cpu_ipi_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)185 static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
186 			      unsigned int nr_irqs, void *arg)
187 {
188 	struct cpu_ipi_domain_state *state = domain->host_data;
189 	unsigned int i, hwirq;
190 	int ret;
191 
192 	for (i = 0; i < nr_irqs; i++) {
193 		hwirq = find_first_zero_bit(state->allocated, 2);
194 		if (hwirq == 2)
195 			return -EBUSY;
196 		bitmap_set(state->allocated, hwirq, 1);
197 
198 		ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
199 						    &mips_mt_cpu_irq_controller,
200 						    NULL);
201 		if (ret)
202 			return ret;
203 
204 		ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
205 						    &mips_mt_cpu_irq_controller,
206 						    NULL);
207 
208 		if (ret)
209 			return ret;
210 
211 		ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
212 		if (ret)
213 			return ret;
214 	}
215 
216 	return 0;
217 }
218 
mips_cpu_ipi_match(struct irq_domain * d,struct device_node * node,enum irq_domain_bus_token bus_token)219 static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
220 			      enum irq_domain_bus_token bus_token)
221 {
222 	bool is_ipi;
223 
224 	switch (bus_token) {
225 	case DOMAIN_BUS_IPI:
226 		is_ipi = d->bus_token == bus_token;
227 		return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
228 	default:
229 		return 0;
230 	}
231 }
232 
233 static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
234 	.alloc	= mips_cpu_ipi_alloc,
235 	.match	= mips_cpu_ipi_match,
236 };
237 
mips_cpu_register_ipi_domain(struct device_node * of_node)238 static void mips_cpu_register_ipi_domain(struct device_node *of_node)
239 {
240 	struct cpu_ipi_domain_state *ipi_domain_state;
241 
242 	ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
243 	ipi_domain = irq_domain_add_hierarchy(irq_domain,
244 					      IRQ_DOMAIN_FLAG_IPI_SINGLE,
245 					      2, of_node,
246 					      &mips_cpu_ipi_chip_ops,
247 					      ipi_domain_state);
248 	if (!ipi_domain)
249 		panic("Failed to add MIPS CPU IPI domain");
250 	irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
251 }
252 
253 #else /* !CONFIG_GENERIC_IRQ_IPI */
254 
mips_cpu_register_ipi_domain(struct device_node * of_node)255 static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
256 
257 #endif /* !CONFIG_GENERIC_IRQ_IPI */
258 
__mips_cpu_irq_init(struct device_node * of_node)259 static void __init __mips_cpu_irq_init(struct device_node *of_node)
260 {
261 	/* Mask interrupts. */
262 	clear_c0_status(ST0_IM);
263 	clear_c0_cause(CAUSEF_IP);
264 
265 	irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
266 					   &mips_cpu_intc_irq_domain_ops,
267 					   NULL);
268 	if (!irq_domain)
269 		panic("Failed to add irqdomain for MIPS CPU");
270 
271 	/*
272 	 * Only proceed to register the software interrupt IPI implementation
273 	 * for CPUs which implement the MIPS MT (multi-threading) ASE.
274 	 */
275 	if (cpu_has_mipsmt)
276 		mips_cpu_register_ipi_domain(of_node);
277 }
278 
mips_cpu_irq_init(void)279 void __init mips_cpu_irq_init(void)
280 {
281 	__mips_cpu_irq_init(NULL);
282 }
283 
mips_cpu_irq_of_init(struct device_node * of_node,struct device_node * parent)284 int __init mips_cpu_irq_of_init(struct device_node *of_node,
285 				struct device_node *parent)
286 {
287 	__mips_cpu_irq_init(of_node);
288 	return 0;
289 }
290 IRQCHIP_DECLARE(cpu_intc, "mti,cpu-interrupt-controller", mips_cpu_irq_of_init);
291