1 /*
2  * regmap based irq_chip
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/export.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/regmap.h>
20 #include <linux/slab.h>
21 
22 #include "internal.h"
23 
24 struct regmap_irq_chip_data {
25 	struct mutex lock;
26 	struct irq_chip irq_chip;
27 
28 	struct regmap *map;
29 	const struct regmap_irq_chip *chip;
30 
31 	int irq_base;
32 	struct irq_domain *domain;
33 
34 	int irq;
35 	int wake_count;
36 
37 	void *status_reg_buf;
38 	unsigned int *status_buf;
39 	unsigned int *mask_buf;
40 	unsigned int *mask_buf_def;
41 	unsigned int *wake_buf;
42 	unsigned int *type_buf;
43 	unsigned int *type_buf_def;
44 
45 	unsigned int irq_reg_stride;
46 	unsigned int type_reg_stride;
47 };
48 
49 static inline const
irq_to_regmap_irq(struct regmap_irq_chip_data * data,int irq)50 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
51 				     int irq)
52 {
53 	return &data->chip->irqs[irq];
54 }
55 
regmap_irq_lock(struct irq_data * data)56 static void regmap_irq_lock(struct irq_data *data)
57 {
58 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
59 
60 	mutex_lock(&d->lock);
61 }
62 
regmap_irq_update_bits(struct regmap_irq_chip_data * d,unsigned int reg,unsigned int mask,unsigned int val)63 static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
64 				  unsigned int reg, unsigned int mask,
65 				  unsigned int val)
66 {
67 	if (d->chip->mask_writeonly)
68 		return regmap_write_bits(d->map, reg, mask, val);
69 	else
70 		return regmap_update_bits(d->map, reg, mask, val);
71 }
72 
regmap_irq_sync_unlock(struct irq_data * data)73 static void regmap_irq_sync_unlock(struct irq_data *data)
74 {
75 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
76 	struct regmap *map = d->map;
77 	int i, ret;
78 	u32 reg;
79 	u32 unmask_offset;
80 
81 	if (d->chip->runtime_pm) {
82 		ret = pm_runtime_get_sync(map->dev);
83 		if (ret < 0)
84 			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
85 				ret);
86 	}
87 
88 	/*
89 	 * If there's been a change in the mask write it back to the
90 	 * hardware.  We rely on the use of the regmap core cache to
91 	 * suppress pointless writes.
92 	 */
93 	for (i = 0; i < d->chip->num_regs; i++) {
94 		if (!d->chip->mask_base)
95 			continue;
96 
97 		reg = d->chip->mask_base +
98 			(i * map->reg_stride * d->irq_reg_stride);
99 		if (d->chip->mask_invert) {
100 			ret = regmap_irq_update_bits(d, reg,
101 					 d->mask_buf_def[i], ~d->mask_buf[i]);
102 		} else if (d->chip->unmask_base) {
103 			/* set mask with mask_base register */
104 			ret = regmap_irq_update_bits(d, reg,
105 					d->mask_buf_def[i], ~d->mask_buf[i]);
106 			if (ret < 0)
107 				dev_err(d->map->dev,
108 					"Failed to sync unmasks in %x\n",
109 					reg);
110 			unmask_offset = d->chip->unmask_base -
111 							d->chip->mask_base;
112 			/* clear mask with unmask_base register */
113 			ret = regmap_irq_update_bits(d,
114 					reg + unmask_offset,
115 					d->mask_buf_def[i],
116 					d->mask_buf[i]);
117 		} else {
118 			ret = regmap_irq_update_bits(d, reg,
119 					 d->mask_buf_def[i], d->mask_buf[i]);
120 		}
121 		if (ret != 0)
122 			dev_err(d->map->dev, "Failed to sync masks in %x\n",
123 				reg);
124 
125 		reg = d->chip->wake_base +
126 			(i * map->reg_stride * d->irq_reg_stride);
127 		if (d->wake_buf) {
128 			if (d->chip->wake_invert)
129 				ret = regmap_irq_update_bits(d, reg,
130 							 d->mask_buf_def[i],
131 							 ~d->wake_buf[i]);
132 			else
133 				ret = regmap_irq_update_bits(d, reg,
134 							 d->mask_buf_def[i],
135 							 d->wake_buf[i]);
136 			if (ret != 0)
137 				dev_err(d->map->dev,
138 					"Failed to sync wakes in %x: %d\n",
139 					reg, ret);
140 		}
141 
142 		if (!d->chip->init_ack_masked)
143 			continue;
144 		/*
145 		 * Ack all the masked interrupts unconditionally,
146 		 * OR if there is masked interrupt which hasn't been Acked,
147 		 * it'll be ignored in irq handler, then may introduce irq storm
148 		 */
149 		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
150 			reg = d->chip->ack_base +
151 				(i * map->reg_stride * d->irq_reg_stride);
152 			/* some chips ack by write 0 */
153 			if (d->chip->ack_invert)
154 				ret = regmap_write(map, reg, ~d->mask_buf[i]);
155 			else
156 				ret = regmap_write(map, reg, d->mask_buf[i]);
157 			if (ret != 0)
158 				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
159 					reg, ret);
160 		}
161 	}
162 
163 	for (i = 0; i < d->chip->num_type_reg; i++) {
164 		if (!d->type_buf_def[i])
165 			continue;
166 		reg = d->chip->type_base +
167 			(i * map->reg_stride * d->type_reg_stride);
168 		if (d->chip->type_invert)
169 			ret = regmap_irq_update_bits(d, reg,
170 				d->type_buf_def[i], ~d->type_buf[i]);
171 		else
172 			ret = regmap_irq_update_bits(d, reg,
173 				d->type_buf_def[i], d->type_buf[i]);
174 		if (ret != 0)
175 			dev_err(d->map->dev, "Failed to sync type in %x\n",
176 				reg);
177 	}
178 
179 	if (d->chip->runtime_pm)
180 		pm_runtime_put(map->dev);
181 
182 	/* If we've changed our wakeup count propagate it to the parent */
183 	if (d->wake_count < 0)
184 		for (i = d->wake_count; i < 0; i++)
185 			irq_set_irq_wake(d->irq, 0);
186 	else if (d->wake_count > 0)
187 		for (i = 0; i < d->wake_count; i++)
188 			irq_set_irq_wake(d->irq, 1);
189 
190 	d->wake_count = 0;
191 
192 	mutex_unlock(&d->lock);
193 }
194 
regmap_irq_enable(struct irq_data * data)195 static void regmap_irq_enable(struct irq_data *data)
196 {
197 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
198 	struct regmap *map = d->map;
199 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
200 
201 	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
202 }
203 
regmap_irq_disable(struct irq_data * data)204 static void regmap_irq_disable(struct irq_data *data)
205 {
206 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
207 	struct regmap *map = d->map;
208 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
209 
210 	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
211 }
212 
regmap_irq_set_type(struct irq_data * data,unsigned int type)213 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
214 {
215 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
216 	struct regmap *map = d->map;
217 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
218 	int reg = irq_data->type_reg_offset / map->reg_stride;
219 
220 	if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
221 		return 0;
222 
223 	d->type_buf[reg] &= ~(irq_data->type_falling_mask |
224 					irq_data->type_rising_mask);
225 	switch (type) {
226 	case IRQ_TYPE_EDGE_FALLING:
227 		d->type_buf[reg] |= irq_data->type_falling_mask;
228 		break;
229 
230 	case IRQ_TYPE_EDGE_RISING:
231 		d->type_buf[reg] |= irq_data->type_rising_mask;
232 		break;
233 
234 	case IRQ_TYPE_EDGE_BOTH:
235 		d->type_buf[reg] |= (irq_data->type_falling_mask |
236 					irq_data->type_rising_mask);
237 		break;
238 
239 	default:
240 		return -EINVAL;
241 	}
242 	return 0;
243 }
244 
regmap_irq_set_wake(struct irq_data * data,unsigned int on)245 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
246 {
247 	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
248 	struct regmap *map = d->map;
249 	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
250 
251 	if (on) {
252 		if (d->wake_buf)
253 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
254 				&= ~irq_data->mask;
255 		d->wake_count++;
256 	} else {
257 		if (d->wake_buf)
258 			d->wake_buf[irq_data->reg_offset / map->reg_stride]
259 				|= irq_data->mask;
260 		d->wake_count--;
261 	}
262 
263 	return 0;
264 }
265 
266 static const struct irq_chip regmap_irq_chip = {
267 	.irq_bus_lock		= regmap_irq_lock,
268 	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
269 	.irq_disable		= regmap_irq_disable,
270 	.irq_enable		= regmap_irq_enable,
271 	.irq_set_type		= regmap_irq_set_type,
272 	.irq_set_wake		= regmap_irq_set_wake,
273 };
274 
regmap_irq_thread(int irq,void * d)275 static irqreturn_t regmap_irq_thread(int irq, void *d)
276 {
277 	struct regmap_irq_chip_data *data = d;
278 	const struct regmap_irq_chip *chip = data->chip;
279 	struct regmap *map = data->map;
280 	int ret, i;
281 	bool handled = false;
282 	u32 reg;
283 
284 	if (chip->handle_pre_irq)
285 		chip->handle_pre_irq(chip->irq_drv_data);
286 
287 	if (chip->runtime_pm) {
288 		ret = pm_runtime_get_sync(map->dev);
289 		if (ret < 0) {
290 			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
291 				ret);
292 			pm_runtime_put(map->dev);
293 			goto exit;
294 		}
295 	}
296 
297 	/*
298 	 * Read in the statuses, using a single bulk read if possible
299 	 * in order to reduce the I/O overheads.
300 	 */
301 	if (!map->use_single_read && map->reg_stride == 1 &&
302 	    data->irq_reg_stride == 1) {
303 		u8 *buf8 = data->status_reg_buf;
304 		u16 *buf16 = data->status_reg_buf;
305 		u32 *buf32 = data->status_reg_buf;
306 
307 		BUG_ON(!data->status_reg_buf);
308 
309 		ret = regmap_bulk_read(map, chip->status_base,
310 				       data->status_reg_buf,
311 				       chip->num_regs);
312 		if (ret != 0) {
313 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
314 				ret);
315 			goto exit;
316 		}
317 
318 		for (i = 0; i < data->chip->num_regs; i++) {
319 			switch (map->format.val_bytes) {
320 			case 1:
321 				data->status_buf[i] = buf8[i];
322 				break;
323 			case 2:
324 				data->status_buf[i] = buf16[i];
325 				break;
326 			case 4:
327 				data->status_buf[i] = buf32[i];
328 				break;
329 			default:
330 				BUG();
331 				goto exit;
332 			}
333 		}
334 
335 	} else {
336 		for (i = 0; i < data->chip->num_regs; i++) {
337 			ret = regmap_read(map, chip->status_base +
338 					  (i * map->reg_stride
339 					   * data->irq_reg_stride),
340 					  &data->status_buf[i]);
341 
342 			if (ret != 0) {
343 				dev_err(map->dev,
344 					"Failed to read IRQ status: %d\n",
345 					ret);
346 				if (chip->runtime_pm)
347 					pm_runtime_put(map->dev);
348 				goto exit;
349 			}
350 		}
351 	}
352 
353 	/*
354 	 * Ignore masked IRQs and ack if we need to; we ack early so
355 	 * there is no race between handling and acknowleding the
356 	 * interrupt.  We assume that typically few of the interrupts
357 	 * will fire simultaneously so don't worry about overhead from
358 	 * doing a write per register.
359 	 */
360 	for (i = 0; i < data->chip->num_regs; i++) {
361 		data->status_buf[i] &= ~data->mask_buf[i];
362 
363 		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
364 			reg = chip->ack_base +
365 				(i * map->reg_stride * data->irq_reg_stride);
366 			ret = regmap_write(map, reg, data->status_buf[i]);
367 			if (ret != 0)
368 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
369 					reg, ret);
370 		}
371 	}
372 
373 	for (i = 0; i < chip->num_irqs; i++) {
374 		if (data->status_buf[chip->irqs[i].reg_offset /
375 				     map->reg_stride] & chip->irqs[i].mask) {
376 			handle_nested_irq(irq_find_mapping(data->domain, i));
377 			handled = true;
378 		}
379 	}
380 
381 	if (chip->runtime_pm)
382 		pm_runtime_put(map->dev);
383 
384 exit:
385 	if (chip->handle_post_irq)
386 		chip->handle_post_irq(chip->irq_drv_data);
387 
388 	if (handled)
389 		return IRQ_HANDLED;
390 	else
391 		return IRQ_NONE;
392 }
393 
regmap_irq_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)394 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
395 			  irq_hw_number_t hw)
396 {
397 	struct regmap_irq_chip_data *data = h->host_data;
398 
399 	irq_set_chip_data(virq, data);
400 	irq_set_chip(virq, &data->irq_chip);
401 	irq_set_nested_thread(virq, 1);
402 	irq_set_parent(virq, data->irq);
403 	irq_set_noprobe(virq);
404 
405 	return 0;
406 }
407 
408 static const struct irq_domain_ops regmap_domain_ops = {
409 	.map	= regmap_irq_map,
410 	.xlate	= irq_domain_xlate_onetwocell,
411 };
412 
413 /**
414  * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
415  *
416  * @map: The regmap for the device.
417  * @irq: The IRQ the device uses to signal interrupts.
418  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
419  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
420  * @chip: Configuration for the interrupt controller.
421  * @data: Runtime data structure for the controller, allocated on success.
422  *
423  * Returns 0 on success or an errno on failure.
424  *
425  * In order for this to be efficient the chip really should use a
426  * register cache.  The chip driver is responsible for restoring the
427  * register values used by the IRQ controller over suspend and resume.
428  */
regmap_add_irq_chip(struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)429 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
430 			int irq_base, const struct regmap_irq_chip *chip,
431 			struct regmap_irq_chip_data **data)
432 {
433 	struct regmap_irq_chip_data *d;
434 	int i;
435 	int ret = -ENOMEM;
436 	u32 reg;
437 	u32 unmask_offset;
438 
439 	if (chip->num_regs <= 0)
440 		return -EINVAL;
441 
442 	for (i = 0; i < chip->num_irqs; i++) {
443 		if (chip->irqs[i].reg_offset % map->reg_stride)
444 			return -EINVAL;
445 		if (chip->irqs[i].reg_offset / map->reg_stride >=
446 		    chip->num_regs)
447 			return -EINVAL;
448 	}
449 
450 	if (irq_base) {
451 		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
452 		if (irq_base < 0) {
453 			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
454 				 irq_base);
455 			return irq_base;
456 		}
457 	}
458 
459 	d = kzalloc(sizeof(*d), GFP_KERNEL);
460 	if (!d)
461 		return -ENOMEM;
462 
463 	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
464 				GFP_KERNEL);
465 	if (!d->status_buf)
466 		goto err_alloc;
467 
468 	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
469 			      GFP_KERNEL);
470 	if (!d->mask_buf)
471 		goto err_alloc;
472 
473 	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
474 				  GFP_KERNEL);
475 	if (!d->mask_buf_def)
476 		goto err_alloc;
477 
478 	if (chip->wake_base) {
479 		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
480 				      GFP_KERNEL);
481 		if (!d->wake_buf)
482 			goto err_alloc;
483 	}
484 
485 	if (chip->num_type_reg) {
486 		d->type_buf_def = kcalloc(chip->num_type_reg,
487 					sizeof(unsigned int), GFP_KERNEL);
488 		if (!d->type_buf_def)
489 			goto err_alloc;
490 
491 		d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
492 				      GFP_KERNEL);
493 		if (!d->type_buf)
494 			goto err_alloc;
495 	}
496 
497 	d->irq_chip = regmap_irq_chip;
498 	d->irq_chip.name = chip->name;
499 	d->irq = irq;
500 	d->map = map;
501 	d->chip = chip;
502 	d->irq_base = irq_base;
503 
504 	if (chip->irq_reg_stride)
505 		d->irq_reg_stride = chip->irq_reg_stride;
506 	else
507 		d->irq_reg_stride = 1;
508 
509 	if (chip->type_reg_stride)
510 		d->type_reg_stride = chip->type_reg_stride;
511 	else
512 		d->type_reg_stride = 1;
513 
514 	if (!map->use_single_read && map->reg_stride == 1 &&
515 	    d->irq_reg_stride == 1) {
516 		d->status_reg_buf = kmalloc_array(chip->num_regs,
517 						  map->format.val_bytes,
518 						  GFP_KERNEL);
519 		if (!d->status_reg_buf)
520 			goto err_alloc;
521 	}
522 
523 	mutex_init(&d->lock);
524 
525 	for (i = 0; i < chip->num_irqs; i++)
526 		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
527 			|= chip->irqs[i].mask;
528 
529 	/* Mask all the interrupts by default */
530 	for (i = 0; i < chip->num_regs; i++) {
531 		d->mask_buf[i] = d->mask_buf_def[i];
532 		if (!chip->mask_base)
533 			continue;
534 
535 		reg = chip->mask_base +
536 			(i * map->reg_stride * d->irq_reg_stride);
537 		if (chip->mask_invert)
538 			ret = regmap_irq_update_bits(d, reg,
539 					 d->mask_buf[i], ~d->mask_buf[i]);
540 		else if (d->chip->unmask_base) {
541 			unmask_offset = d->chip->unmask_base -
542 					d->chip->mask_base;
543 			ret = regmap_irq_update_bits(d,
544 					reg + unmask_offset,
545 					d->mask_buf[i],
546 					d->mask_buf[i]);
547 		} else
548 			ret = regmap_irq_update_bits(d, reg,
549 					 d->mask_buf[i], d->mask_buf[i]);
550 		if (ret != 0) {
551 			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
552 				reg, ret);
553 			goto err_alloc;
554 		}
555 
556 		if (!chip->init_ack_masked)
557 			continue;
558 
559 		/* Ack masked but set interrupts */
560 		reg = chip->status_base +
561 			(i * map->reg_stride * d->irq_reg_stride);
562 		ret = regmap_read(map, reg, &d->status_buf[i]);
563 		if (ret != 0) {
564 			dev_err(map->dev, "Failed to read IRQ status: %d\n",
565 				ret);
566 			goto err_alloc;
567 		}
568 
569 		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
570 			reg = chip->ack_base +
571 				(i * map->reg_stride * d->irq_reg_stride);
572 			if (chip->ack_invert)
573 				ret = regmap_write(map, reg,
574 					~(d->status_buf[i] & d->mask_buf[i]));
575 			else
576 				ret = regmap_write(map, reg,
577 					d->status_buf[i] & d->mask_buf[i]);
578 			if (ret != 0) {
579 				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
580 					reg, ret);
581 				goto err_alloc;
582 			}
583 		}
584 	}
585 
586 	/* Wake is disabled by default */
587 	if (d->wake_buf) {
588 		for (i = 0; i < chip->num_regs; i++) {
589 			d->wake_buf[i] = d->mask_buf_def[i];
590 			reg = chip->wake_base +
591 				(i * map->reg_stride * d->irq_reg_stride);
592 
593 			if (chip->wake_invert)
594 				ret = regmap_irq_update_bits(d, reg,
595 							 d->mask_buf_def[i],
596 							 0);
597 			else
598 				ret = regmap_irq_update_bits(d, reg,
599 							 d->mask_buf_def[i],
600 							 d->wake_buf[i]);
601 			if (ret != 0) {
602 				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
603 					reg, ret);
604 				goto err_alloc;
605 			}
606 		}
607 	}
608 
609 	if (chip->num_type_reg) {
610 		for (i = 0; i < chip->num_irqs; i++) {
611 			reg = chip->irqs[i].type_reg_offset / map->reg_stride;
612 			d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
613 					chip->irqs[i].type_falling_mask;
614 		}
615 		for (i = 0; i < chip->num_type_reg; ++i) {
616 			if (!d->type_buf_def[i])
617 				continue;
618 
619 			reg = chip->type_base +
620 				(i * map->reg_stride * d->type_reg_stride);
621 			if (chip->type_invert)
622 				ret = regmap_irq_update_bits(d, reg,
623 					d->type_buf_def[i], 0xFF);
624 			else
625 				ret = regmap_irq_update_bits(d, reg,
626 					d->type_buf_def[i], 0x0);
627 			if (ret != 0) {
628 				dev_err(map->dev,
629 					"Failed to set type in 0x%x: %x\n",
630 					reg, ret);
631 				goto err_alloc;
632 			}
633 		}
634 	}
635 
636 	if (irq_base)
637 		d->domain = irq_domain_add_legacy(map->dev->of_node,
638 						  chip->num_irqs, irq_base, 0,
639 						  &regmap_domain_ops, d);
640 	else
641 		d->domain = irq_domain_add_linear(map->dev->of_node,
642 						  chip->num_irqs,
643 						  &regmap_domain_ops, d);
644 	if (!d->domain) {
645 		dev_err(map->dev, "Failed to create IRQ domain\n");
646 		ret = -ENOMEM;
647 		goto err_alloc;
648 	}
649 
650 	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
651 				   irq_flags | IRQF_ONESHOT,
652 				   chip->name, d);
653 	if (ret != 0) {
654 		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
655 			irq, chip->name, ret);
656 		goto err_domain;
657 	}
658 
659 	*data = d;
660 
661 	return 0;
662 
663 err_domain:
664 	/* Should really dispose of the domain but... */
665 err_alloc:
666 	kfree(d->type_buf);
667 	kfree(d->type_buf_def);
668 	kfree(d->wake_buf);
669 	kfree(d->mask_buf_def);
670 	kfree(d->mask_buf);
671 	kfree(d->status_buf);
672 	kfree(d->status_reg_buf);
673 	kfree(d);
674 	return ret;
675 }
676 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
677 
678 /**
679  * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
680  *
681  * @irq: Primary IRQ for the device
682  * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
683  *
684  * This function also disposes of all mapped IRQs on the chip.
685  */
regmap_del_irq_chip(int irq,struct regmap_irq_chip_data * d)686 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
687 {
688 	unsigned int virq;
689 	int hwirq;
690 
691 	if (!d)
692 		return;
693 
694 	free_irq(irq, d);
695 
696 	/* Dispose all virtual irq from irq domain before removing it */
697 	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
698 		/* Ignore hwirq if holes in the IRQ list */
699 		if (!d->chip->irqs[hwirq].mask)
700 			continue;
701 
702 		/*
703 		 * Find the virtual irq of hwirq on chip and if it is
704 		 * there then dispose it
705 		 */
706 		virq = irq_find_mapping(d->domain, hwirq);
707 		if (virq)
708 			irq_dispose_mapping(virq);
709 	}
710 
711 	irq_domain_remove(d->domain);
712 	kfree(d->type_buf);
713 	kfree(d->type_buf_def);
714 	kfree(d->wake_buf);
715 	kfree(d->mask_buf_def);
716 	kfree(d->mask_buf);
717 	kfree(d->status_reg_buf);
718 	kfree(d->status_buf);
719 	kfree(d);
720 }
721 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
722 
devm_regmap_irq_chip_release(struct device * dev,void * res)723 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
724 {
725 	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
726 
727 	regmap_del_irq_chip(d->irq, d);
728 }
729 
devm_regmap_irq_chip_match(struct device * dev,void * res,void * data)730 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
731 
732 {
733 	struct regmap_irq_chip_data **r = res;
734 
735 	if (!r || !*r) {
736 		WARN_ON(!r || !*r);
737 		return 0;
738 	}
739 	return *r == data;
740 }
741 
742 /**
743  * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
744  *
745  * @dev: The device pointer on which irq_chip belongs to.
746  * @map: The regmap for the device.
747  * @irq: The IRQ the device uses to signal interrupts
748  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
749  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
750  * @chip: Configuration for the interrupt controller.
751  * @data: Runtime data structure for the controller, allocated on success
752  *
753  * Returns 0 on success or an errno on failure.
754  *
755  * The &regmap_irq_chip_data will be automatically released when the device is
756  * unbound.
757  */
devm_regmap_add_irq_chip(struct device * dev,struct regmap * map,int irq,int irq_flags,int irq_base,const struct regmap_irq_chip * chip,struct regmap_irq_chip_data ** data)758 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
759 			     int irq_flags, int irq_base,
760 			     const struct regmap_irq_chip *chip,
761 			     struct regmap_irq_chip_data **data)
762 {
763 	struct regmap_irq_chip_data **ptr, *d;
764 	int ret;
765 
766 	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
767 			   GFP_KERNEL);
768 	if (!ptr)
769 		return -ENOMEM;
770 
771 	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
772 				  chip, &d);
773 	if (ret < 0) {
774 		devres_free(ptr);
775 		return ret;
776 	}
777 
778 	*ptr = d;
779 	devres_add(dev, ptr);
780 	*data = d;
781 	return 0;
782 }
783 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
784 
785 /**
786  * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
787  *
788  * @dev: Device for which which resource was allocated.
789  * @irq: Primary IRQ for the device.
790  * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
791  *
792  * A resource managed version of regmap_del_irq_chip().
793  */
devm_regmap_del_irq_chip(struct device * dev,int irq,struct regmap_irq_chip_data * data)794 void devm_regmap_del_irq_chip(struct device *dev, int irq,
795 			      struct regmap_irq_chip_data *data)
796 {
797 	int rc;
798 
799 	WARN_ON(irq != data->irq);
800 	rc = devres_release(dev, devm_regmap_irq_chip_release,
801 			    devm_regmap_irq_chip_match, data);
802 
803 	if (rc != 0)
804 		WARN_ON(rc);
805 }
806 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
807 
808 /**
809  * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
810  *
811  * @data: regmap irq controller to operate on.
812  *
813  * Useful for drivers to request their own IRQs.
814  */
regmap_irq_chip_get_base(struct regmap_irq_chip_data * data)815 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
816 {
817 	WARN_ON(!data->irq_base);
818 	return data->irq_base;
819 }
820 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
821 
822 /**
823  * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
824  *
825  * @data: regmap irq controller to operate on.
826  * @irq: index of the interrupt requested in the chip IRQs.
827  *
828  * Useful for drivers to request their own IRQs.
829  */
regmap_irq_get_virq(struct regmap_irq_chip_data * data,int irq)830 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
831 {
832 	/* Handle holes in the IRQ list */
833 	if (!data->chip->irqs[irq].mask)
834 		return -EINVAL;
835 
836 	return irq_create_mapping(data->domain, irq);
837 }
838 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
839 
840 /**
841  * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
842  *
843  * @data: regmap_irq controller to operate on.
844  *
845  * Useful for drivers to request their own IRQs and for integration
846  * with subsystems.  For ease of integration NULL is accepted as a
847  * domain, allowing devices to just call this even if no domain is
848  * allocated.
849  */
regmap_irq_get_domain(struct regmap_irq_chip_data * data)850 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
851 {
852 	if (data)
853 		return data->domain;
854 	else
855 		return NULL;
856 }
857 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
858