1 /*
2  * Renesas IRQC Driver
3  *
4  *  Copyright (C) 2013 Magnus Damm
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 
20 #include <linux/init.h>
21 #include <linux/platform_device.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/io.h>
26 #include <linux/irq.h>
27 #include <linux/irqdomain.h>
28 #include <linux/err.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
31 #include <linux/pm_runtime.h>
32 
33 #define IRQC_IRQ_MAX	32	/* maximum 32 interrupts per driver instance */
34 
35 #define IRQC_REQ_STS	0x00	/* Interrupt Request Status Register */
36 #define IRQC_EN_STS	0x04	/* Interrupt Enable Status Register */
37 #define IRQC_EN_SET	0x08	/* Interrupt Enable Set Register */
38 #define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
39 				/* SYS-CPU vs. RT-CPU */
40 #define DETECT_STATUS	0x100	/* IRQn Detect Status Register */
41 #define MONITOR		0x104	/* IRQn Signal Level Monitor Register */
42 #define HLVL_STS	0x108	/* IRQn High Level Detect Status Register */
43 #define LLVL_STS	0x10c	/* IRQn Low Level Detect Status Register */
44 #define S_R_EDGE_STS	0x110	/* IRQn Sync Rising Edge Detect Status Reg. */
45 #define S_F_EDGE_STS	0x114	/* IRQn Sync Falling Edge Detect Status Reg. */
46 #define A_R_EDGE_STS	0x118	/* IRQn Async Rising Edge Detect Status Reg. */
47 #define A_F_EDGE_STS	0x11c	/* IRQn Async Falling Edge Detect Status Reg. */
48 #define CHTEN_STS	0x120	/* Chattering Reduction Status Register */
49 #define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
50 				/* IRQn Configuration Register */
51 
52 struct irqc_irq {
53 	int hw_irq;
54 	int requested_irq;
55 	struct irqc_priv *p;
56 };
57 
58 struct irqc_priv {
59 	void __iomem *iomem;
60 	void __iomem *cpu_int_base;
61 	struct irqc_irq irq[IRQC_IRQ_MAX];
62 	unsigned int number_of_irqs;
63 	struct platform_device *pdev;
64 	struct irq_chip_generic *gc;
65 	struct irq_domain *irq_domain;
66 	atomic_t wakeup_path;
67 };
68 
irq_data_to_priv(struct irq_data * data)69 static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
70 {
71 	return data->domain->host_data;
72 }
73 
irqc_dbg(struct irqc_irq * i,char * str)74 static void irqc_dbg(struct irqc_irq *i, char *str)
75 {
76 	dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
77 		str, i->requested_irq, i->hw_irq);
78 }
79 
80 static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
81 	[IRQ_TYPE_LEVEL_LOW]	= 0x01,
82 	[IRQ_TYPE_LEVEL_HIGH]	= 0x02,
83 	[IRQ_TYPE_EDGE_FALLING]	= 0x04,	/* Synchronous */
84 	[IRQ_TYPE_EDGE_RISING]	= 0x08,	/* Synchronous */
85 	[IRQ_TYPE_EDGE_BOTH]	= 0x0c,	/* Synchronous */
86 };
87 
irqc_irq_set_type(struct irq_data * d,unsigned int type)88 static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
89 {
90 	struct irqc_priv *p = irq_data_to_priv(d);
91 	int hw_irq = irqd_to_hwirq(d);
92 	unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
93 	u32 tmp;
94 
95 	irqc_dbg(&p->irq[hw_irq], "sense");
96 
97 	if (!value)
98 		return -EINVAL;
99 
100 	tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
101 	tmp &= ~0x3f;
102 	tmp |= value;
103 	iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
104 	return 0;
105 }
106 
irqc_irq_set_wake(struct irq_data * d,unsigned int on)107 static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
108 {
109 	struct irqc_priv *p = irq_data_to_priv(d);
110 	int hw_irq = irqd_to_hwirq(d);
111 
112 	irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
113 	if (on)
114 		atomic_inc(&p->wakeup_path);
115 	else
116 		atomic_dec(&p->wakeup_path);
117 
118 	return 0;
119 }
120 
irqc_irq_handler(int irq,void * dev_id)121 static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
122 {
123 	struct irqc_irq *i = dev_id;
124 	struct irqc_priv *p = i->p;
125 	u32 bit = BIT(i->hw_irq);
126 
127 	irqc_dbg(i, "demux1");
128 
129 	if (ioread32(p->iomem + DETECT_STATUS) & bit) {
130 		iowrite32(bit, p->iomem + DETECT_STATUS);
131 		irqc_dbg(i, "demux2");
132 		generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
133 		return IRQ_HANDLED;
134 	}
135 	return IRQ_NONE;
136 }
137 
irqc_probe(struct platform_device * pdev)138 static int irqc_probe(struct platform_device *pdev)
139 {
140 	struct irqc_priv *p;
141 	struct resource *io;
142 	struct resource *irq;
143 	const char *name = dev_name(&pdev->dev);
144 	int ret;
145 	int k;
146 
147 	p = kzalloc(sizeof(*p), GFP_KERNEL);
148 	if (!p) {
149 		dev_err(&pdev->dev, "failed to allocate driver data\n");
150 		ret = -ENOMEM;
151 		goto err0;
152 	}
153 
154 	p->pdev = pdev;
155 	platform_set_drvdata(pdev, p);
156 
157 	pm_runtime_enable(&pdev->dev);
158 	pm_runtime_get_sync(&pdev->dev);
159 
160 	/* get hold of manadatory IOMEM */
161 	io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
162 	if (!io) {
163 		dev_err(&pdev->dev, "not enough IOMEM resources\n");
164 		ret = -EINVAL;
165 		goto err1;
166 	}
167 
168 	/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
169 	for (k = 0; k < IRQC_IRQ_MAX; k++) {
170 		irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
171 		if (!irq)
172 			break;
173 
174 		p->irq[k].p = p;
175 		p->irq[k].hw_irq = k;
176 		p->irq[k].requested_irq = irq->start;
177 	}
178 
179 	p->number_of_irqs = k;
180 	if (p->number_of_irqs < 1) {
181 		dev_err(&pdev->dev, "not enough IRQ resources\n");
182 		ret = -EINVAL;
183 		goto err1;
184 	}
185 
186 	/* ioremap IOMEM and setup read/write callbacks */
187 	p->iomem = ioremap_nocache(io->start, resource_size(io));
188 	if (!p->iomem) {
189 		dev_err(&pdev->dev, "failed to remap IOMEM\n");
190 		ret = -ENXIO;
191 		goto err2;
192 	}
193 
194 	p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
195 
196 	p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
197 					      p->number_of_irqs,
198 					      &irq_generic_chip_ops, p);
199 	if (!p->irq_domain) {
200 		ret = -ENXIO;
201 		dev_err(&pdev->dev, "cannot initialize irq domain\n");
202 		goto err2;
203 	}
204 
205 	ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
206 					     1, name, handle_level_irq,
207 					     0, 0, IRQ_GC_INIT_NESTED_LOCK);
208 	if (ret) {
209 		dev_err(&pdev->dev, "cannot allocate generic chip\n");
210 		goto err3;
211 	}
212 
213 	p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
214 	p->gc->reg_base = p->cpu_int_base;
215 	p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
216 	p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
217 	p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
218 	p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
219 	p->gc->chip_types[0].chip.irq_set_type	= irqc_irq_set_type;
220 	p->gc->chip_types[0].chip.irq_set_wake	= irqc_irq_set_wake;
221 	p->gc->chip_types[0].chip.flags	= IRQCHIP_MASK_ON_SUSPEND;
222 
223 	/* request interrupts one by one */
224 	for (k = 0; k < p->number_of_irqs; k++) {
225 		if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
226 				0, name, &p->irq[k])) {
227 			dev_err(&pdev->dev, "failed to request IRQ\n");
228 			ret = -ENOENT;
229 			goto err4;
230 		}
231 	}
232 
233 	dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
234 
235 	return 0;
236 err4:
237 	while (--k >= 0)
238 		free_irq(p->irq[k].requested_irq, &p->irq[k]);
239 
240 err3:
241 	irq_domain_remove(p->irq_domain);
242 err2:
243 	iounmap(p->iomem);
244 err1:
245 	pm_runtime_put(&pdev->dev);
246 	pm_runtime_disable(&pdev->dev);
247 	kfree(p);
248 err0:
249 	return ret;
250 }
251 
irqc_remove(struct platform_device * pdev)252 static int irqc_remove(struct platform_device *pdev)
253 {
254 	struct irqc_priv *p = platform_get_drvdata(pdev);
255 	int k;
256 
257 	for (k = 0; k < p->number_of_irqs; k++)
258 		free_irq(p->irq[k].requested_irq, &p->irq[k]);
259 
260 	irq_domain_remove(p->irq_domain);
261 	iounmap(p->iomem);
262 	pm_runtime_put(&pdev->dev);
263 	pm_runtime_disable(&pdev->dev);
264 	kfree(p);
265 	return 0;
266 }
267 
irqc_suspend(struct device * dev)268 static int __maybe_unused irqc_suspend(struct device *dev)
269 {
270 	struct irqc_priv *p = dev_get_drvdata(dev);
271 
272 	if (atomic_read(&p->wakeup_path))
273 		device_set_wakeup_path(dev);
274 
275 	return 0;
276 }
277 
278 static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
279 
280 static const struct of_device_id irqc_dt_ids[] = {
281 	{ .compatible = "renesas,irqc", },
282 	{},
283 };
284 MODULE_DEVICE_TABLE(of, irqc_dt_ids);
285 
286 static struct platform_driver irqc_device_driver = {
287 	.probe		= irqc_probe,
288 	.remove		= irqc_remove,
289 	.driver		= {
290 		.name	= "renesas_irqc",
291 		.of_match_table	= irqc_dt_ids,
292 		.pm	= &irqc_pm_ops,
293 	}
294 };
295 
irqc_init(void)296 static int __init irqc_init(void)
297 {
298 	return platform_driver_register(&irqc_device_driver);
299 }
300 postcore_initcall(irqc_init);
301 
irqc_exit(void)302 static void __exit irqc_exit(void)
303 {
304 	platform_driver_unregister(&irqc_device_driver);
305 }
306 module_exit(irqc_exit);
307 
308 MODULE_AUTHOR("Magnus Damm");
309 MODULE_DESCRIPTION("Renesas IRQC Driver");
310 MODULE_LICENSE("GPL v2");
311