1 /*
2  * Register map access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/of.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/log2.h>
23 #include <linux/hwspinlock.h>
24 #include <asm/unaligned.h>
25 
26 #define CREATE_TRACE_POINTS
27 #include "trace.h"
28 
29 #include "internal.h"
30 
31 /*
32  * Sometimes for failures during very early init the trace
33  * infrastructure isn't available early enough to be used.  For this
34  * sort of problem defining LOG_DEVICE will add printks for basic
35  * register I/O on a specific device.
36  */
37 #undef LOG_DEVICE
38 
39 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
40 			       unsigned int mask, unsigned int val,
41 			       bool *change, bool force_write);
42 
43 static int _regmap_bus_reg_read(void *context, unsigned int reg,
44 				unsigned int *val);
45 static int _regmap_bus_read(void *context, unsigned int reg,
46 			    unsigned int *val);
47 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
48 				       unsigned int val);
49 static int _regmap_bus_reg_write(void *context, unsigned int reg,
50 				 unsigned int val);
51 static int _regmap_bus_raw_write(void *context, unsigned int reg,
52 				 unsigned int val);
53 
regmap_reg_in_ranges(unsigned int reg,const struct regmap_range * ranges,unsigned int nranges)54 bool regmap_reg_in_ranges(unsigned int reg,
55 			  const struct regmap_range *ranges,
56 			  unsigned int nranges)
57 {
58 	const struct regmap_range *r;
59 	int i;
60 
61 	for (i = 0, r = ranges; i < nranges; i++, r++)
62 		if (regmap_reg_in_range(reg, r))
63 			return true;
64 	return false;
65 }
66 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
67 
regmap_check_range_table(struct regmap * map,unsigned int reg,const struct regmap_access_table * table)68 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
69 			      const struct regmap_access_table *table)
70 {
71 	/* Check "no ranges" first */
72 	if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
73 		return false;
74 
75 	/* In case zero "yes ranges" are supplied, any reg is OK */
76 	if (!table->n_yes_ranges)
77 		return true;
78 
79 	return regmap_reg_in_ranges(reg, table->yes_ranges,
80 				    table->n_yes_ranges);
81 }
82 EXPORT_SYMBOL_GPL(regmap_check_range_table);
83 
regmap_writeable(struct regmap * map,unsigned int reg)84 bool regmap_writeable(struct regmap *map, unsigned int reg)
85 {
86 	if (map->max_register && reg > map->max_register)
87 		return false;
88 
89 	if (map->writeable_reg)
90 		return map->writeable_reg(map->dev, reg);
91 
92 	if (map->wr_table)
93 		return regmap_check_range_table(map, reg, map->wr_table);
94 
95 	return true;
96 }
97 
regmap_cached(struct regmap * map,unsigned int reg)98 bool regmap_cached(struct regmap *map, unsigned int reg)
99 {
100 	int ret;
101 	unsigned int val;
102 
103 	if (map->cache_type == REGCACHE_NONE)
104 		return false;
105 
106 	if (!map->cache_ops)
107 		return false;
108 
109 	if (map->max_register && reg > map->max_register)
110 		return false;
111 
112 	map->lock(map->lock_arg);
113 	ret = regcache_read(map, reg, &val);
114 	map->unlock(map->lock_arg);
115 	if (ret)
116 		return false;
117 
118 	return true;
119 }
120 
regmap_readable(struct regmap * map,unsigned int reg)121 bool regmap_readable(struct regmap *map, unsigned int reg)
122 {
123 	if (!map->reg_read)
124 		return false;
125 
126 	if (map->max_register && reg > map->max_register)
127 		return false;
128 
129 	if (map->format.format_write)
130 		return false;
131 
132 	if (map->readable_reg)
133 		return map->readable_reg(map->dev, reg);
134 
135 	if (map->rd_table)
136 		return regmap_check_range_table(map, reg, map->rd_table);
137 
138 	return true;
139 }
140 
regmap_volatile(struct regmap * map,unsigned int reg)141 bool regmap_volatile(struct regmap *map, unsigned int reg)
142 {
143 	if (!map->format.format_write && !regmap_readable(map, reg))
144 		return false;
145 
146 	if (map->volatile_reg)
147 		return map->volatile_reg(map->dev, reg);
148 
149 	if (map->volatile_table)
150 		return regmap_check_range_table(map, reg, map->volatile_table);
151 
152 	if (map->cache_ops)
153 		return false;
154 	else
155 		return true;
156 }
157 
regmap_precious(struct regmap * map,unsigned int reg)158 bool regmap_precious(struct regmap *map, unsigned int reg)
159 {
160 	if (!regmap_readable(map, reg))
161 		return false;
162 
163 	if (map->precious_reg)
164 		return map->precious_reg(map->dev, reg);
165 
166 	if (map->precious_table)
167 		return regmap_check_range_table(map, reg, map->precious_table);
168 
169 	return false;
170 }
171 
regmap_readable_noinc(struct regmap * map,unsigned int reg)172 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
173 {
174 	if (map->readable_noinc_reg)
175 		return map->readable_noinc_reg(map->dev, reg);
176 
177 	if (map->rd_noinc_table)
178 		return regmap_check_range_table(map, reg, map->rd_noinc_table);
179 
180 	return true;
181 }
182 
regmap_volatile_range(struct regmap * map,unsigned int reg,size_t num)183 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
184 	size_t num)
185 {
186 	unsigned int i;
187 
188 	for (i = 0; i < num; i++)
189 		if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
190 			return false;
191 
192 	return true;
193 }
194 
regmap_format_2_6_write(struct regmap * map,unsigned int reg,unsigned int val)195 static void regmap_format_2_6_write(struct regmap *map,
196 				     unsigned int reg, unsigned int val)
197 {
198 	u8 *out = map->work_buf;
199 
200 	*out = (reg << 6) | val;
201 }
202 
regmap_format_4_12_write(struct regmap * map,unsigned int reg,unsigned int val)203 static void regmap_format_4_12_write(struct regmap *map,
204 				     unsigned int reg, unsigned int val)
205 {
206 	__be16 *out = map->work_buf;
207 	*out = cpu_to_be16((reg << 12) | val);
208 }
209 
regmap_format_7_9_write(struct regmap * map,unsigned int reg,unsigned int val)210 static void regmap_format_7_9_write(struct regmap *map,
211 				    unsigned int reg, unsigned int val)
212 {
213 	__be16 *out = map->work_buf;
214 	*out = cpu_to_be16((reg << 9) | val);
215 }
216 
regmap_format_10_14_write(struct regmap * map,unsigned int reg,unsigned int val)217 static void regmap_format_10_14_write(struct regmap *map,
218 				    unsigned int reg, unsigned int val)
219 {
220 	u8 *out = map->work_buf;
221 
222 	out[2] = val;
223 	out[1] = (val >> 8) | (reg << 6);
224 	out[0] = reg >> 2;
225 }
226 
regmap_format_8(void * buf,unsigned int val,unsigned int shift)227 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
228 {
229 	u8 *b = buf;
230 
231 	b[0] = val << shift;
232 }
233 
regmap_format_16_be(void * buf,unsigned int val,unsigned int shift)234 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
235 {
236 	put_unaligned_be16(val << shift, buf);
237 }
238 
regmap_format_16_le(void * buf,unsigned int val,unsigned int shift)239 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
240 {
241 	put_unaligned_le16(val << shift, buf);
242 }
243 
regmap_format_16_native(void * buf,unsigned int val,unsigned int shift)244 static void regmap_format_16_native(void *buf, unsigned int val,
245 				    unsigned int shift)
246 {
247 	u16 v = val << shift;
248 
249 	memcpy(buf, &v, sizeof(v));
250 }
251 
regmap_format_24(void * buf,unsigned int val,unsigned int shift)252 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
253 {
254 	u8 *b = buf;
255 
256 	val <<= shift;
257 
258 	b[0] = val >> 16;
259 	b[1] = val >> 8;
260 	b[2] = val;
261 }
262 
regmap_format_32_be(void * buf,unsigned int val,unsigned int shift)263 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
264 {
265 	put_unaligned_be32(val << shift, buf);
266 }
267 
regmap_format_32_le(void * buf,unsigned int val,unsigned int shift)268 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
269 {
270 	put_unaligned_le32(val << shift, buf);
271 }
272 
regmap_format_32_native(void * buf,unsigned int val,unsigned int shift)273 static void regmap_format_32_native(void *buf, unsigned int val,
274 				    unsigned int shift)
275 {
276 	u32 v = val << shift;
277 
278 	memcpy(buf, &v, sizeof(v));
279 }
280 
281 #ifdef CONFIG_64BIT
regmap_format_64_be(void * buf,unsigned int val,unsigned int shift)282 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
283 {
284 	put_unaligned_be64((u64) val << shift, buf);
285 }
286 
regmap_format_64_le(void * buf,unsigned int val,unsigned int shift)287 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
288 {
289 	put_unaligned_le64((u64) val << shift, buf);
290 }
291 
regmap_format_64_native(void * buf,unsigned int val,unsigned int shift)292 static void regmap_format_64_native(void *buf, unsigned int val,
293 				    unsigned int shift)
294 {
295 	u64 v = (u64) val << shift;
296 
297 	memcpy(buf, &v, sizeof(v));
298 }
299 #endif
300 
regmap_parse_inplace_noop(void * buf)301 static void regmap_parse_inplace_noop(void *buf)
302 {
303 }
304 
regmap_parse_8(const void * buf)305 static unsigned int regmap_parse_8(const void *buf)
306 {
307 	const u8 *b = buf;
308 
309 	return b[0];
310 }
311 
regmap_parse_16_be(const void * buf)312 static unsigned int regmap_parse_16_be(const void *buf)
313 {
314 	return get_unaligned_be16(buf);
315 }
316 
regmap_parse_16_le(const void * buf)317 static unsigned int regmap_parse_16_le(const void *buf)
318 {
319 	return get_unaligned_le16(buf);
320 }
321 
regmap_parse_16_be_inplace(void * buf)322 static void regmap_parse_16_be_inplace(void *buf)
323 {
324 	u16 v = get_unaligned_be16(buf);
325 
326 	memcpy(buf, &v, sizeof(v));
327 }
328 
regmap_parse_16_le_inplace(void * buf)329 static void regmap_parse_16_le_inplace(void *buf)
330 {
331 	u16 v = get_unaligned_le16(buf);
332 
333 	memcpy(buf, &v, sizeof(v));
334 }
335 
regmap_parse_16_native(const void * buf)336 static unsigned int regmap_parse_16_native(const void *buf)
337 {
338 	u16 v;
339 
340 	memcpy(&v, buf, sizeof(v));
341 	return v;
342 }
343 
regmap_parse_24(const void * buf)344 static unsigned int regmap_parse_24(const void *buf)
345 {
346 	const u8 *b = buf;
347 	unsigned int ret = b[2];
348 	ret |= ((unsigned int)b[1]) << 8;
349 	ret |= ((unsigned int)b[0]) << 16;
350 
351 	return ret;
352 }
353 
regmap_parse_32_be(const void * buf)354 static unsigned int regmap_parse_32_be(const void *buf)
355 {
356 	return get_unaligned_be32(buf);
357 }
358 
regmap_parse_32_le(const void * buf)359 static unsigned int regmap_parse_32_le(const void *buf)
360 {
361 	return get_unaligned_le32(buf);
362 }
363 
regmap_parse_32_be_inplace(void * buf)364 static void regmap_parse_32_be_inplace(void *buf)
365 {
366 	u32 v = get_unaligned_be32(buf);
367 
368 	memcpy(buf, &v, sizeof(v));
369 }
370 
regmap_parse_32_le_inplace(void * buf)371 static void regmap_parse_32_le_inplace(void *buf)
372 {
373 	u32 v = get_unaligned_le32(buf);
374 
375 	memcpy(buf, &v, sizeof(v));
376 }
377 
regmap_parse_32_native(const void * buf)378 static unsigned int regmap_parse_32_native(const void *buf)
379 {
380 	u32 v;
381 
382 	memcpy(&v, buf, sizeof(v));
383 	return v;
384 }
385 
386 #ifdef CONFIG_64BIT
regmap_parse_64_be(const void * buf)387 static unsigned int regmap_parse_64_be(const void *buf)
388 {
389 	return get_unaligned_be64(buf);
390 }
391 
regmap_parse_64_le(const void * buf)392 static unsigned int regmap_parse_64_le(const void *buf)
393 {
394 	return get_unaligned_le64(buf);
395 }
396 
regmap_parse_64_be_inplace(void * buf)397 static void regmap_parse_64_be_inplace(void *buf)
398 {
399 	u64 v =  get_unaligned_be64(buf);
400 
401 	memcpy(buf, &v, sizeof(v));
402 }
403 
regmap_parse_64_le_inplace(void * buf)404 static void regmap_parse_64_le_inplace(void *buf)
405 {
406 	u64 v = get_unaligned_le64(buf);
407 
408 	memcpy(buf, &v, sizeof(v));
409 }
410 
regmap_parse_64_native(const void * buf)411 static unsigned int regmap_parse_64_native(const void *buf)
412 {
413 	u64 v;
414 
415 	memcpy(&v, buf, sizeof(v));
416 	return v;
417 }
418 #endif
419 
regmap_lock_hwlock(void * __map)420 static void regmap_lock_hwlock(void *__map)
421 {
422 	struct regmap *map = __map;
423 
424 	hwspin_lock_timeout(map->hwlock, UINT_MAX);
425 }
426 
regmap_lock_hwlock_irq(void * __map)427 static void regmap_lock_hwlock_irq(void *__map)
428 {
429 	struct regmap *map = __map;
430 
431 	hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
432 }
433 
regmap_lock_hwlock_irqsave(void * __map)434 static void regmap_lock_hwlock_irqsave(void *__map)
435 {
436 	struct regmap *map = __map;
437 
438 	hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
439 				    &map->spinlock_flags);
440 }
441 
regmap_unlock_hwlock(void * __map)442 static void regmap_unlock_hwlock(void *__map)
443 {
444 	struct regmap *map = __map;
445 
446 	hwspin_unlock(map->hwlock);
447 }
448 
regmap_unlock_hwlock_irq(void * __map)449 static void regmap_unlock_hwlock_irq(void *__map)
450 {
451 	struct regmap *map = __map;
452 
453 	hwspin_unlock_irq(map->hwlock);
454 }
455 
regmap_unlock_hwlock_irqrestore(void * __map)456 static void regmap_unlock_hwlock_irqrestore(void *__map)
457 {
458 	struct regmap *map = __map;
459 
460 	hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
461 }
462 
regmap_lock_unlock_none(void * __map)463 static void regmap_lock_unlock_none(void *__map)
464 {
465 
466 }
467 
regmap_lock_mutex(void * __map)468 static void regmap_lock_mutex(void *__map)
469 {
470 	struct regmap *map = __map;
471 	mutex_lock(&map->mutex);
472 }
473 
regmap_unlock_mutex(void * __map)474 static void regmap_unlock_mutex(void *__map)
475 {
476 	struct regmap *map = __map;
477 	mutex_unlock(&map->mutex);
478 }
479 
regmap_lock_spinlock(void * __map)480 static void regmap_lock_spinlock(void *__map)
481 __acquires(&map->spinlock)
482 {
483 	struct regmap *map = __map;
484 	unsigned long flags;
485 
486 	spin_lock_irqsave(&map->spinlock, flags);
487 	map->spinlock_flags = flags;
488 }
489 
regmap_unlock_spinlock(void * __map)490 static void regmap_unlock_spinlock(void *__map)
491 __releases(&map->spinlock)
492 {
493 	struct regmap *map = __map;
494 	spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
495 }
496 
dev_get_regmap_release(struct device * dev,void * res)497 static void dev_get_regmap_release(struct device *dev, void *res)
498 {
499 	/*
500 	 * We don't actually have anything to do here; the goal here
501 	 * is not to manage the regmap but to provide a simple way to
502 	 * get the regmap back given a struct device.
503 	 */
504 }
505 
_regmap_range_add(struct regmap * map,struct regmap_range_node * data)506 static bool _regmap_range_add(struct regmap *map,
507 			      struct regmap_range_node *data)
508 {
509 	struct rb_root *root = &map->range_tree;
510 	struct rb_node **new = &(root->rb_node), *parent = NULL;
511 
512 	while (*new) {
513 		struct regmap_range_node *this =
514 			rb_entry(*new, struct regmap_range_node, node);
515 
516 		parent = *new;
517 		if (data->range_max < this->range_min)
518 			new = &((*new)->rb_left);
519 		else if (data->range_min > this->range_max)
520 			new = &((*new)->rb_right);
521 		else
522 			return false;
523 	}
524 
525 	rb_link_node(&data->node, parent, new);
526 	rb_insert_color(&data->node, root);
527 
528 	return true;
529 }
530 
_regmap_range_lookup(struct regmap * map,unsigned int reg)531 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
532 						      unsigned int reg)
533 {
534 	struct rb_node *node = map->range_tree.rb_node;
535 
536 	while (node) {
537 		struct regmap_range_node *this =
538 			rb_entry(node, struct regmap_range_node, node);
539 
540 		if (reg < this->range_min)
541 			node = node->rb_left;
542 		else if (reg > this->range_max)
543 			node = node->rb_right;
544 		else
545 			return this;
546 	}
547 
548 	return NULL;
549 }
550 
regmap_range_exit(struct regmap * map)551 static void regmap_range_exit(struct regmap *map)
552 {
553 	struct rb_node *next;
554 	struct regmap_range_node *range_node;
555 
556 	next = rb_first(&map->range_tree);
557 	while (next) {
558 		range_node = rb_entry(next, struct regmap_range_node, node);
559 		next = rb_next(&range_node->node);
560 		rb_erase(&range_node->node, &map->range_tree);
561 		kfree(range_node);
562 	}
563 
564 	kfree(map->selector_work_buf);
565 }
566 
regmap_attach_dev(struct device * dev,struct regmap * map,const struct regmap_config * config)567 int regmap_attach_dev(struct device *dev, struct regmap *map,
568 		      const struct regmap_config *config)
569 {
570 	struct regmap **m;
571 
572 	map->dev = dev;
573 
574 	regmap_debugfs_init(map, config->name);
575 
576 	/* Add a devres resource for dev_get_regmap() */
577 	m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
578 	if (!m) {
579 		regmap_debugfs_exit(map);
580 		return -ENOMEM;
581 	}
582 	*m = map;
583 	devres_add(dev, m);
584 
585 	return 0;
586 }
587 EXPORT_SYMBOL_GPL(regmap_attach_dev);
588 
regmap_get_reg_endian(const struct regmap_bus * bus,const struct regmap_config * config)589 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
590 					const struct regmap_config *config)
591 {
592 	enum regmap_endian endian;
593 
594 	/* Retrieve the endianness specification from the regmap config */
595 	endian = config->reg_format_endian;
596 
597 	/* If the regmap config specified a non-default value, use that */
598 	if (endian != REGMAP_ENDIAN_DEFAULT)
599 		return endian;
600 
601 	/* Retrieve the endianness specification from the bus config */
602 	if (bus && bus->reg_format_endian_default)
603 		endian = bus->reg_format_endian_default;
604 
605 	/* If the bus specified a non-default value, use that */
606 	if (endian != REGMAP_ENDIAN_DEFAULT)
607 		return endian;
608 
609 	/* Use this if no other value was found */
610 	return REGMAP_ENDIAN_BIG;
611 }
612 
regmap_get_val_endian(struct device * dev,const struct regmap_bus * bus,const struct regmap_config * config)613 enum regmap_endian regmap_get_val_endian(struct device *dev,
614 					 const struct regmap_bus *bus,
615 					 const struct regmap_config *config)
616 {
617 	struct device_node *np;
618 	enum regmap_endian endian;
619 
620 	/* Retrieve the endianness specification from the regmap config */
621 	endian = config->val_format_endian;
622 
623 	/* If the regmap config specified a non-default value, use that */
624 	if (endian != REGMAP_ENDIAN_DEFAULT)
625 		return endian;
626 
627 	/* If the dev and dev->of_node exist try to get endianness from DT */
628 	if (dev && dev->of_node) {
629 		np = dev->of_node;
630 
631 		/* Parse the device's DT node for an endianness specification */
632 		if (of_property_read_bool(np, "big-endian"))
633 			endian = REGMAP_ENDIAN_BIG;
634 		else if (of_property_read_bool(np, "little-endian"))
635 			endian = REGMAP_ENDIAN_LITTLE;
636 		else if (of_property_read_bool(np, "native-endian"))
637 			endian = REGMAP_ENDIAN_NATIVE;
638 
639 		/* If the endianness was specified in DT, use that */
640 		if (endian != REGMAP_ENDIAN_DEFAULT)
641 			return endian;
642 	}
643 
644 	/* Retrieve the endianness specification from the bus config */
645 	if (bus && bus->val_format_endian_default)
646 		endian = bus->val_format_endian_default;
647 
648 	/* If the bus specified a non-default value, use that */
649 	if (endian != REGMAP_ENDIAN_DEFAULT)
650 		return endian;
651 
652 	/* Use this if no other value was found */
653 	return REGMAP_ENDIAN_BIG;
654 }
655 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
656 
__regmap_init(struct device * dev,const struct regmap_bus * bus,void * bus_context,const struct regmap_config * config,struct lock_class_key * lock_key,const char * lock_name)657 struct regmap *__regmap_init(struct device *dev,
658 			     const struct regmap_bus *bus,
659 			     void *bus_context,
660 			     const struct regmap_config *config,
661 			     struct lock_class_key *lock_key,
662 			     const char *lock_name)
663 {
664 	struct regmap *map;
665 	int ret = -EINVAL;
666 	enum regmap_endian reg_endian, val_endian;
667 	int i, j;
668 
669 	if (!config)
670 		goto err;
671 
672 	map = kzalloc(sizeof(*map), GFP_KERNEL);
673 	if (map == NULL) {
674 		ret = -ENOMEM;
675 		goto err;
676 	}
677 
678 	if (config->name) {
679 		map->name = kstrdup_const(config->name, GFP_KERNEL);
680 		if (!map->name) {
681 			ret = -ENOMEM;
682 			goto err_map;
683 		}
684 	}
685 
686 	if (config->disable_locking) {
687 		map->lock = map->unlock = regmap_lock_unlock_none;
688 		regmap_debugfs_disable(map);
689 	} else if (config->lock && config->unlock) {
690 		map->lock = config->lock;
691 		map->unlock = config->unlock;
692 		map->lock_arg = config->lock_arg;
693 	} else if (config->use_hwlock) {
694 		map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
695 		if (!map->hwlock) {
696 			ret = -ENXIO;
697 			goto err_name;
698 		}
699 
700 		switch (config->hwlock_mode) {
701 		case HWLOCK_IRQSTATE:
702 			map->lock = regmap_lock_hwlock_irqsave;
703 			map->unlock = regmap_unlock_hwlock_irqrestore;
704 			break;
705 		case HWLOCK_IRQ:
706 			map->lock = regmap_lock_hwlock_irq;
707 			map->unlock = regmap_unlock_hwlock_irq;
708 			break;
709 		default:
710 			map->lock = regmap_lock_hwlock;
711 			map->unlock = regmap_unlock_hwlock;
712 			break;
713 		}
714 
715 		map->lock_arg = map;
716 	} else {
717 		if ((bus && bus->fast_io) ||
718 		    config->fast_io) {
719 			spin_lock_init(&map->spinlock);
720 			map->lock = regmap_lock_spinlock;
721 			map->unlock = regmap_unlock_spinlock;
722 			lockdep_set_class_and_name(&map->spinlock,
723 						   lock_key, lock_name);
724 		} else {
725 			mutex_init(&map->mutex);
726 			map->lock = regmap_lock_mutex;
727 			map->unlock = regmap_unlock_mutex;
728 			lockdep_set_class_and_name(&map->mutex,
729 						   lock_key, lock_name);
730 		}
731 		map->lock_arg = map;
732 	}
733 
734 	/*
735 	 * When we write in fast-paths with regmap_bulk_write() don't allocate
736 	 * scratch buffers with sleeping allocations.
737 	 */
738 	if ((bus && bus->fast_io) || config->fast_io)
739 		map->alloc_flags = GFP_ATOMIC;
740 	else
741 		map->alloc_flags = GFP_KERNEL;
742 
743 	map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
744 	map->format.pad_bytes = config->pad_bits / 8;
745 	map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
746 	map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
747 			config->val_bits + config->pad_bits, 8);
748 	map->reg_shift = config->pad_bits % 8;
749 	if (config->reg_stride)
750 		map->reg_stride = config->reg_stride;
751 	else
752 		map->reg_stride = 1;
753 	if (is_power_of_2(map->reg_stride))
754 		map->reg_stride_order = ilog2(map->reg_stride);
755 	else
756 		map->reg_stride_order = -1;
757 	map->use_single_read = config->use_single_rw || !bus || !bus->read;
758 	map->use_single_write = config->use_single_rw || !bus || !bus->write;
759 	map->can_multi_write = config->can_multi_write && bus && bus->write;
760 	if (bus) {
761 		map->max_raw_read = bus->max_raw_read;
762 		map->max_raw_write = bus->max_raw_write;
763 	}
764 	map->dev = dev;
765 	map->bus = bus;
766 	map->bus_context = bus_context;
767 	map->max_register = config->max_register;
768 	map->wr_table = config->wr_table;
769 	map->rd_table = config->rd_table;
770 	map->volatile_table = config->volatile_table;
771 	map->precious_table = config->precious_table;
772 	map->rd_noinc_table = config->rd_noinc_table;
773 	map->writeable_reg = config->writeable_reg;
774 	map->readable_reg = config->readable_reg;
775 	map->volatile_reg = config->volatile_reg;
776 	map->precious_reg = config->precious_reg;
777 	map->readable_noinc_reg = config->readable_noinc_reg;
778 	map->cache_type = config->cache_type;
779 
780 	spin_lock_init(&map->async_lock);
781 	INIT_LIST_HEAD(&map->async_list);
782 	INIT_LIST_HEAD(&map->async_free);
783 	init_waitqueue_head(&map->async_waitq);
784 
785 	if (config->read_flag_mask ||
786 	    config->write_flag_mask ||
787 	    config->zero_flag_mask) {
788 		map->read_flag_mask = config->read_flag_mask;
789 		map->write_flag_mask = config->write_flag_mask;
790 	} else if (bus) {
791 		map->read_flag_mask = bus->read_flag_mask;
792 	}
793 
794 	if (!bus) {
795 		map->reg_read  = config->reg_read;
796 		map->reg_write = config->reg_write;
797 
798 		map->defer_caching = false;
799 		goto skip_format_initialization;
800 	} else if (!bus->read || !bus->write) {
801 		map->reg_read = _regmap_bus_reg_read;
802 		map->reg_write = _regmap_bus_reg_write;
803 
804 		map->defer_caching = false;
805 		goto skip_format_initialization;
806 	} else {
807 		map->reg_read  = _regmap_bus_read;
808 		map->reg_update_bits = bus->reg_update_bits;
809 	}
810 
811 	reg_endian = regmap_get_reg_endian(bus, config);
812 	val_endian = regmap_get_val_endian(dev, bus, config);
813 
814 	switch (config->reg_bits + map->reg_shift) {
815 	case 2:
816 		switch (config->val_bits) {
817 		case 6:
818 			map->format.format_write = regmap_format_2_6_write;
819 			break;
820 		default:
821 			goto err_hwlock;
822 		}
823 		break;
824 
825 	case 4:
826 		switch (config->val_bits) {
827 		case 12:
828 			map->format.format_write = regmap_format_4_12_write;
829 			break;
830 		default:
831 			goto err_hwlock;
832 		}
833 		break;
834 
835 	case 7:
836 		switch (config->val_bits) {
837 		case 9:
838 			map->format.format_write = regmap_format_7_9_write;
839 			break;
840 		default:
841 			goto err_hwlock;
842 		}
843 		break;
844 
845 	case 10:
846 		switch (config->val_bits) {
847 		case 14:
848 			map->format.format_write = regmap_format_10_14_write;
849 			break;
850 		default:
851 			goto err_hwlock;
852 		}
853 		break;
854 
855 	case 8:
856 		map->format.format_reg = regmap_format_8;
857 		break;
858 
859 	case 16:
860 		switch (reg_endian) {
861 		case REGMAP_ENDIAN_BIG:
862 			map->format.format_reg = regmap_format_16_be;
863 			break;
864 		case REGMAP_ENDIAN_LITTLE:
865 			map->format.format_reg = regmap_format_16_le;
866 			break;
867 		case REGMAP_ENDIAN_NATIVE:
868 			map->format.format_reg = regmap_format_16_native;
869 			break;
870 		default:
871 			goto err_hwlock;
872 		}
873 		break;
874 
875 	case 24:
876 		if (reg_endian != REGMAP_ENDIAN_BIG)
877 			goto err_hwlock;
878 		map->format.format_reg = regmap_format_24;
879 		break;
880 
881 	case 32:
882 		switch (reg_endian) {
883 		case REGMAP_ENDIAN_BIG:
884 			map->format.format_reg = regmap_format_32_be;
885 			break;
886 		case REGMAP_ENDIAN_LITTLE:
887 			map->format.format_reg = regmap_format_32_le;
888 			break;
889 		case REGMAP_ENDIAN_NATIVE:
890 			map->format.format_reg = regmap_format_32_native;
891 			break;
892 		default:
893 			goto err_hwlock;
894 		}
895 		break;
896 
897 #ifdef CONFIG_64BIT
898 	case 64:
899 		switch (reg_endian) {
900 		case REGMAP_ENDIAN_BIG:
901 			map->format.format_reg = regmap_format_64_be;
902 			break;
903 		case REGMAP_ENDIAN_LITTLE:
904 			map->format.format_reg = regmap_format_64_le;
905 			break;
906 		case REGMAP_ENDIAN_NATIVE:
907 			map->format.format_reg = regmap_format_64_native;
908 			break;
909 		default:
910 			goto err_hwlock;
911 		}
912 		break;
913 #endif
914 
915 	default:
916 		goto err_hwlock;
917 	}
918 
919 	if (val_endian == REGMAP_ENDIAN_NATIVE)
920 		map->format.parse_inplace = regmap_parse_inplace_noop;
921 
922 	switch (config->val_bits) {
923 	case 8:
924 		map->format.format_val = regmap_format_8;
925 		map->format.parse_val = regmap_parse_8;
926 		map->format.parse_inplace = regmap_parse_inplace_noop;
927 		break;
928 	case 16:
929 		switch (val_endian) {
930 		case REGMAP_ENDIAN_BIG:
931 			map->format.format_val = regmap_format_16_be;
932 			map->format.parse_val = regmap_parse_16_be;
933 			map->format.parse_inplace = regmap_parse_16_be_inplace;
934 			break;
935 		case REGMAP_ENDIAN_LITTLE:
936 			map->format.format_val = regmap_format_16_le;
937 			map->format.parse_val = regmap_parse_16_le;
938 			map->format.parse_inplace = regmap_parse_16_le_inplace;
939 			break;
940 		case REGMAP_ENDIAN_NATIVE:
941 			map->format.format_val = regmap_format_16_native;
942 			map->format.parse_val = regmap_parse_16_native;
943 			break;
944 		default:
945 			goto err_hwlock;
946 		}
947 		break;
948 	case 24:
949 		if (val_endian != REGMAP_ENDIAN_BIG)
950 			goto err_hwlock;
951 		map->format.format_val = regmap_format_24;
952 		map->format.parse_val = regmap_parse_24;
953 		break;
954 	case 32:
955 		switch (val_endian) {
956 		case REGMAP_ENDIAN_BIG:
957 			map->format.format_val = regmap_format_32_be;
958 			map->format.parse_val = regmap_parse_32_be;
959 			map->format.parse_inplace = regmap_parse_32_be_inplace;
960 			break;
961 		case REGMAP_ENDIAN_LITTLE:
962 			map->format.format_val = regmap_format_32_le;
963 			map->format.parse_val = regmap_parse_32_le;
964 			map->format.parse_inplace = regmap_parse_32_le_inplace;
965 			break;
966 		case REGMAP_ENDIAN_NATIVE:
967 			map->format.format_val = regmap_format_32_native;
968 			map->format.parse_val = regmap_parse_32_native;
969 			break;
970 		default:
971 			goto err_hwlock;
972 		}
973 		break;
974 #ifdef CONFIG_64BIT
975 	case 64:
976 		switch (val_endian) {
977 		case REGMAP_ENDIAN_BIG:
978 			map->format.format_val = regmap_format_64_be;
979 			map->format.parse_val = regmap_parse_64_be;
980 			map->format.parse_inplace = regmap_parse_64_be_inplace;
981 			break;
982 		case REGMAP_ENDIAN_LITTLE:
983 			map->format.format_val = regmap_format_64_le;
984 			map->format.parse_val = regmap_parse_64_le;
985 			map->format.parse_inplace = regmap_parse_64_le_inplace;
986 			break;
987 		case REGMAP_ENDIAN_NATIVE:
988 			map->format.format_val = regmap_format_64_native;
989 			map->format.parse_val = regmap_parse_64_native;
990 			break;
991 		default:
992 			goto err_hwlock;
993 		}
994 		break;
995 #endif
996 	}
997 
998 	if (map->format.format_write) {
999 		if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1000 		    (val_endian != REGMAP_ENDIAN_BIG))
1001 			goto err_hwlock;
1002 		map->use_single_write = true;
1003 	}
1004 
1005 	if (!map->format.format_write &&
1006 	    !(map->format.format_reg && map->format.format_val))
1007 		goto err_hwlock;
1008 
1009 	map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1010 	if (map->work_buf == NULL) {
1011 		ret = -ENOMEM;
1012 		goto err_hwlock;
1013 	}
1014 
1015 	if (map->format.format_write) {
1016 		map->defer_caching = false;
1017 		map->reg_write = _regmap_bus_formatted_write;
1018 	} else if (map->format.format_val) {
1019 		map->defer_caching = true;
1020 		map->reg_write = _regmap_bus_raw_write;
1021 	}
1022 
1023 skip_format_initialization:
1024 
1025 	map->range_tree = RB_ROOT;
1026 	for (i = 0; i < config->num_ranges; i++) {
1027 		const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1028 		struct regmap_range_node *new;
1029 
1030 		/* Sanity check */
1031 		if (range_cfg->range_max < range_cfg->range_min) {
1032 			dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1033 				range_cfg->range_max, range_cfg->range_min);
1034 			goto err_range;
1035 		}
1036 
1037 		if (range_cfg->range_max > map->max_register) {
1038 			dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1039 				range_cfg->range_max, map->max_register);
1040 			goto err_range;
1041 		}
1042 
1043 		if (range_cfg->selector_reg > map->max_register) {
1044 			dev_err(map->dev,
1045 				"Invalid range %d: selector out of map\n", i);
1046 			goto err_range;
1047 		}
1048 
1049 		if (range_cfg->window_len == 0) {
1050 			dev_err(map->dev, "Invalid range %d: window_len 0\n",
1051 				i);
1052 			goto err_range;
1053 		}
1054 
1055 		/* Make sure, that this register range has no selector
1056 		   or data window within its boundary */
1057 		for (j = 0; j < config->num_ranges; j++) {
1058 			unsigned sel_reg = config->ranges[j].selector_reg;
1059 			unsigned win_min = config->ranges[j].window_start;
1060 			unsigned win_max = win_min +
1061 					   config->ranges[j].window_len - 1;
1062 
1063 			/* Allow data window inside its own virtual range */
1064 			if (j == i)
1065 				continue;
1066 
1067 			if (range_cfg->range_min <= sel_reg &&
1068 			    sel_reg <= range_cfg->range_max) {
1069 				dev_err(map->dev,
1070 					"Range %d: selector for %d in window\n",
1071 					i, j);
1072 				goto err_range;
1073 			}
1074 
1075 			if (!(win_max < range_cfg->range_min ||
1076 			      win_min > range_cfg->range_max)) {
1077 				dev_err(map->dev,
1078 					"Range %d: window for %d in window\n",
1079 					i, j);
1080 				goto err_range;
1081 			}
1082 		}
1083 
1084 		new = kzalloc(sizeof(*new), GFP_KERNEL);
1085 		if (new == NULL) {
1086 			ret = -ENOMEM;
1087 			goto err_range;
1088 		}
1089 
1090 		new->map = map;
1091 		new->name = range_cfg->name;
1092 		new->range_min = range_cfg->range_min;
1093 		new->range_max = range_cfg->range_max;
1094 		new->selector_reg = range_cfg->selector_reg;
1095 		new->selector_mask = range_cfg->selector_mask;
1096 		new->selector_shift = range_cfg->selector_shift;
1097 		new->window_start = range_cfg->window_start;
1098 		new->window_len = range_cfg->window_len;
1099 
1100 		if (!_regmap_range_add(map, new)) {
1101 			dev_err(map->dev, "Failed to add range %d\n", i);
1102 			kfree(new);
1103 			goto err_range;
1104 		}
1105 
1106 		if (map->selector_work_buf == NULL) {
1107 			map->selector_work_buf =
1108 				kzalloc(map->format.buf_size, GFP_KERNEL);
1109 			if (map->selector_work_buf == NULL) {
1110 				ret = -ENOMEM;
1111 				goto err_range;
1112 			}
1113 		}
1114 	}
1115 
1116 	ret = regcache_init(map, config);
1117 	if (ret != 0)
1118 		goto err_range;
1119 
1120 	if (dev) {
1121 		ret = regmap_attach_dev(dev, map, config);
1122 		if (ret != 0)
1123 			goto err_regcache;
1124 	} else {
1125 		regmap_debugfs_init(map, config->name);
1126 	}
1127 
1128 	return map;
1129 
1130 err_regcache:
1131 	regcache_exit(map);
1132 err_range:
1133 	regmap_range_exit(map);
1134 	kfree(map->work_buf);
1135 err_hwlock:
1136 	if (map->hwlock)
1137 		hwspin_lock_free(map->hwlock);
1138 err_name:
1139 	kfree_const(map->name);
1140 err_map:
1141 	kfree(map);
1142 err:
1143 	return ERR_PTR(ret);
1144 }
1145 EXPORT_SYMBOL_GPL(__regmap_init);
1146 
devm_regmap_release(struct device * dev,void * res)1147 static void devm_regmap_release(struct device *dev, void *res)
1148 {
1149 	regmap_exit(*(struct regmap **)res);
1150 }
1151 
__devm_regmap_init(struct device * dev,const struct regmap_bus * bus,void * bus_context,const struct regmap_config * config,struct lock_class_key * lock_key,const char * lock_name)1152 struct regmap *__devm_regmap_init(struct device *dev,
1153 				  const struct regmap_bus *bus,
1154 				  void *bus_context,
1155 				  const struct regmap_config *config,
1156 				  struct lock_class_key *lock_key,
1157 				  const char *lock_name)
1158 {
1159 	struct regmap **ptr, *regmap;
1160 
1161 	ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1162 	if (!ptr)
1163 		return ERR_PTR(-ENOMEM);
1164 
1165 	regmap = __regmap_init(dev, bus, bus_context, config,
1166 			       lock_key, lock_name);
1167 	if (!IS_ERR(regmap)) {
1168 		*ptr = regmap;
1169 		devres_add(dev, ptr);
1170 	} else {
1171 		devres_free(ptr);
1172 	}
1173 
1174 	return regmap;
1175 }
1176 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1177 
regmap_field_init(struct regmap_field * rm_field,struct regmap * regmap,struct reg_field reg_field)1178 static void regmap_field_init(struct regmap_field *rm_field,
1179 	struct regmap *regmap, struct reg_field reg_field)
1180 {
1181 	rm_field->regmap = regmap;
1182 	rm_field->reg = reg_field.reg;
1183 	rm_field->shift = reg_field.lsb;
1184 	rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1185 	rm_field->id_size = reg_field.id_size;
1186 	rm_field->id_offset = reg_field.id_offset;
1187 }
1188 
1189 /**
1190  * devm_regmap_field_alloc() - Allocate and initialise a register field.
1191  *
1192  * @dev: Device that will be interacted with
1193  * @regmap: regmap bank in which this register field is located.
1194  * @reg_field: Register field with in the bank.
1195  *
1196  * The return value will be an ERR_PTR() on error or a valid pointer
1197  * to a struct regmap_field. The regmap_field will be automatically freed
1198  * by the device management code.
1199  */
devm_regmap_field_alloc(struct device * dev,struct regmap * regmap,struct reg_field reg_field)1200 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1201 		struct regmap *regmap, struct reg_field reg_field)
1202 {
1203 	struct regmap_field *rm_field = devm_kzalloc(dev,
1204 					sizeof(*rm_field), GFP_KERNEL);
1205 	if (!rm_field)
1206 		return ERR_PTR(-ENOMEM);
1207 
1208 	regmap_field_init(rm_field, regmap, reg_field);
1209 
1210 	return rm_field;
1211 
1212 }
1213 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1214 
1215 /**
1216  * devm_regmap_field_free() - Free a register field allocated using
1217  *                            devm_regmap_field_alloc.
1218  *
1219  * @dev: Device that will be interacted with
1220  * @field: regmap field which should be freed.
1221  *
1222  * Free register field allocated using devm_regmap_field_alloc(). Usually
1223  * drivers need not call this function, as the memory allocated via devm
1224  * will be freed as per device-driver life-cyle.
1225  */
devm_regmap_field_free(struct device * dev,struct regmap_field * field)1226 void devm_regmap_field_free(struct device *dev,
1227 	struct regmap_field *field)
1228 {
1229 	devm_kfree(dev, field);
1230 }
1231 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1232 
1233 /**
1234  * regmap_field_alloc() - Allocate and initialise a register field.
1235  *
1236  * @regmap: regmap bank in which this register field is located.
1237  * @reg_field: Register field with in the bank.
1238  *
1239  * The return value will be an ERR_PTR() on error or a valid pointer
1240  * to a struct regmap_field. The regmap_field should be freed by the
1241  * user once its finished working with it using regmap_field_free().
1242  */
regmap_field_alloc(struct regmap * regmap,struct reg_field reg_field)1243 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1244 		struct reg_field reg_field)
1245 {
1246 	struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1247 
1248 	if (!rm_field)
1249 		return ERR_PTR(-ENOMEM);
1250 
1251 	regmap_field_init(rm_field, regmap, reg_field);
1252 
1253 	return rm_field;
1254 }
1255 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1256 
1257 /**
1258  * regmap_field_free() - Free register field allocated using
1259  *                       regmap_field_alloc.
1260  *
1261  * @field: regmap field which should be freed.
1262  */
regmap_field_free(struct regmap_field * field)1263 void regmap_field_free(struct regmap_field *field)
1264 {
1265 	kfree(field);
1266 }
1267 EXPORT_SYMBOL_GPL(regmap_field_free);
1268 
1269 /**
1270  * regmap_reinit_cache() - Reinitialise the current register cache
1271  *
1272  * @map: Register map to operate on.
1273  * @config: New configuration.  Only the cache data will be used.
1274  *
1275  * Discard any existing register cache for the map and initialize a
1276  * new cache.  This can be used to restore the cache to defaults or to
1277  * update the cache configuration to reflect runtime discovery of the
1278  * hardware.
1279  *
1280  * No explicit locking is done here, the user needs to ensure that
1281  * this function will not race with other calls to regmap.
1282  */
regmap_reinit_cache(struct regmap * map,const struct regmap_config * config)1283 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1284 {
1285 	regcache_exit(map);
1286 	regmap_debugfs_exit(map);
1287 
1288 	map->max_register = config->max_register;
1289 	map->writeable_reg = config->writeable_reg;
1290 	map->readable_reg = config->readable_reg;
1291 	map->volatile_reg = config->volatile_reg;
1292 	map->precious_reg = config->precious_reg;
1293 	map->readable_noinc_reg = config->readable_noinc_reg;
1294 	map->cache_type = config->cache_type;
1295 
1296 	regmap_debugfs_init(map, config->name);
1297 
1298 	map->cache_bypass = false;
1299 	map->cache_only = false;
1300 
1301 	return regcache_init(map, config);
1302 }
1303 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1304 
1305 /**
1306  * regmap_exit() - Free a previously allocated register map
1307  *
1308  * @map: Register map to operate on.
1309  */
regmap_exit(struct regmap * map)1310 void regmap_exit(struct regmap *map)
1311 {
1312 	struct regmap_async *async;
1313 
1314 	regcache_exit(map);
1315 	regmap_debugfs_exit(map);
1316 	regmap_range_exit(map);
1317 	if (map->bus && map->bus->free_context)
1318 		map->bus->free_context(map->bus_context);
1319 	kfree(map->work_buf);
1320 	while (!list_empty(&map->async_free)) {
1321 		async = list_first_entry_or_null(&map->async_free,
1322 						 struct regmap_async,
1323 						 list);
1324 		list_del(&async->list);
1325 		kfree(async->work_buf);
1326 		kfree(async);
1327 	}
1328 	if (map->hwlock)
1329 		hwspin_lock_free(map->hwlock);
1330 	kfree_const(map->name);
1331 	kfree(map->patch);
1332 	kfree(map);
1333 }
1334 EXPORT_SYMBOL_GPL(regmap_exit);
1335 
dev_get_regmap_match(struct device * dev,void * res,void * data)1336 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1337 {
1338 	struct regmap **r = res;
1339 	if (!r || !*r) {
1340 		WARN_ON(!r || !*r);
1341 		return 0;
1342 	}
1343 
1344 	/* If the user didn't specify a name match any */
1345 	if (data)
1346 		return !strcmp((*r)->name, data);
1347 	else
1348 		return 1;
1349 }
1350 
1351 /**
1352  * dev_get_regmap() - Obtain the regmap (if any) for a device
1353  *
1354  * @dev: Device to retrieve the map for
1355  * @name: Optional name for the register map, usually NULL.
1356  *
1357  * Returns the regmap for the device if one is present, or NULL.  If
1358  * name is specified then it must match the name specified when
1359  * registering the device, if it is NULL then the first regmap found
1360  * will be used.  Devices with multiple register maps are very rare,
1361  * generic code should normally not need to specify a name.
1362  */
dev_get_regmap(struct device * dev,const char * name)1363 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1364 {
1365 	struct regmap **r = devres_find(dev, dev_get_regmap_release,
1366 					dev_get_regmap_match, (void *)name);
1367 
1368 	if (!r)
1369 		return NULL;
1370 	return *r;
1371 }
1372 EXPORT_SYMBOL_GPL(dev_get_regmap);
1373 
1374 /**
1375  * regmap_get_device() - Obtain the device from a regmap
1376  *
1377  * @map: Register map to operate on.
1378  *
1379  * Returns the underlying device that the regmap has been created for.
1380  */
regmap_get_device(struct regmap * map)1381 struct device *regmap_get_device(struct regmap *map)
1382 {
1383 	return map->dev;
1384 }
1385 EXPORT_SYMBOL_GPL(regmap_get_device);
1386 
_regmap_select_page(struct regmap * map,unsigned int * reg,struct regmap_range_node * range,unsigned int val_num)1387 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1388 			       struct regmap_range_node *range,
1389 			       unsigned int val_num)
1390 {
1391 	void *orig_work_buf;
1392 	unsigned int win_offset;
1393 	unsigned int win_page;
1394 	bool page_chg;
1395 	int ret;
1396 
1397 	win_offset = (*reg - range->range_min) % range->window_len;
1398 	win_page = (*reg - range->range_min) / range->window_len;
1399 
1400 	if (val_num > 1) {
1401 		/* Bulk write shouldn't cross range boundary */
1402 		if (*reg + val_num - 1 > range->range_max)
1403 			return -EINVAL;
1404 
1405 		/* ... or single page boundary */
1406 		if (val_num > range->window_len - win_offset)
1407 			return -EINVAL;
1408 	}
1409 
1410 	/* It is possible to have selector register inside data window.
1411 	   In that case, selector register is located on every page and
1412 	   it needs no page switching, when accessed alone. */
1413 	if (val_num > 1 ||
1414 	    range->window_start + win_offset != range->selector_reg) {
1415 		/* Use separate work_buf during page switching */
1416 		orig_work_buf = map->work_buf;
1417 		map->work_buf = map->selector_work_buf;
1418 
1419 		ret = _regmap_update_bits(map, range->selector_reg,
1420 					  range->selector_mask,
1421 					  win_page << range->selector_shift,
1422 					  &page_chg, false);
1423 
1424 		map->work_buf = orig_work_buf;
1425 
1426 		if (ret != 0)
1427 			return ret;
1428 	}
1429 
1430 	*reg = range->window_start + win_offset;
1431 
1432 	return 0;
1433 }
1434 
regmap_set_work_buf_flag_mask(struct regmap * map,int max_bytes,unsigned long mask)1435 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1436 					  unsigned long mask)
1437 {
1438 	u8 *buf;
1439 	int i;
1440 
1441 	if (!mask || !map->work_buf)
1442 		return;
1443 
1444 	buf = map->work_buf;
1445 
1446 	for (i = 0; i < max_bytes; i++)
1447 		buf[i] |= (mask >> (8 * i)) & 0xff;
1448 }
1449 
_regmap_raw_write_impl(struct regmap * map,unsigned int reg,const void * val,size_t val_len)1450 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1451 				  const void *val, size_t val_len)
1452 {
1453 	struct regmap_range_node *range;
1454 	unsigned long flags;
1455 	void *work_val = map->work_buf + map->format.reg_bytes +
1456 		map->format.pad_bytes;
1457 	void *buf;
1458 	int ret = -ENOTSUPP;
1459 	size_t len;
1460 	int i;
1461 
1462 	WARN_ON(!map->bus);
1463 
1464 	/* Check for unwritable registers before we start */
1465 	if (map->writeable_reg)
1466 		for (i = 0; i < val_len / map->format.val_bytes; i++)
1467 			if (!map->writeable_reg(map->dev,
1468 					       reg + regmap_get_offset(map, i)))
1469 				return -EINVAL;
1470 
1471 	if (!map->cache_bypass && map->format.parse_val) {
1472 		unsigned int ival;
1473 		int val_bytes = map->format.val_bytes;
1474 		for (i = 0; i < val_len / val_bytes; i++) {
1475 			ival = map->format.parse_val(val + (i * val_bytes));
1476 			ret = regcache_write(map,
1477 					     reg + regmap_get_offset(map, i),
1478 					     ival);
1479 			if (ret) {
1480 				dev_err(map->dev,
1481 					"Error in caching of register: %x ret: %d\n",
1482 					reg + regmap_get_offset(map, i), ret);
1483 				return ret;
1484 			}
1485 		}
1486 		if (map->cache_only) {
1487 			map->cache_dirty = true;
1488 			return 0;
1489 		}
1490 	}
1491 
1492 	range = _regmap_range_lookup(map, reg);
1493 	if (range) {
1494 		int val_num = val_len / map->format.val_bytes;
1495 		int win_offset = (reg - range->range_min) % range->window_len;
1496 		int win_residue = range->window_len - win_offset;
1497 
1498 		/* If the write goes beyond the end of the window split it */
1499 		while (val_num > win_residue) {
1500 			dev_dbg(map->dev, "Writing window %d/%zu\n",
1501 				win_residue, val_len / map->format.val_bytes);
1502 			ret = _regmap_raw_write_impl(map, reg, val,
1503 						     win_residue *
1504 						     map->format.val_bytes);
1505 			if (ret != 0)
1506 				return ret;
1507 
1508 			reg += win_residue;
1509 			val_num -= win_residue;
1510 			val += win_residue * map->format.val_bytes;
1511 			val_len -= win_residue * map->format.val_bytes;
1512 
1513 			win_offset = (reg - range->range_min) %
1514 				range->window_len;
1515 			win_residue = range->window_len - win_offset;
1516 		}
1517 
1518 		ret = _regmap_select_page(map, &reg, range, val_num);
1519 		if (ret != 0)
1520 			return ret;
1521 	}
1522 
1523 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
1524 	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1525 				      map->write_flag_mask);
1526 
1527 	/*
1528 	 * Essentially all I/O mechanisms will be faster with a single
1529 	 * buffer to write.  Since register syncs often generate raw
1530 	 * writes of single registers optimise that case.
1531 	 */
1532 	if (val != work_val && val_len == map->format.val_bytes) {
1533 		memcpy(work_val, val, map->format.val_bytes);
1534 		val = work_val;
1535 	}
1536 
1537 	if (map->async && map->bus->async_write) {
1538 		struct regmap_async *async;
1539 
1540 		trace_regmap_async_write_start(map, reg, val_len);
1541 
1542 		spin_lock_irqsave(&map->async_lock, flags);
1543 		async = list_first_entry_or_null(&map->async_free,
1544 						 struct regmap_async,
1545 						 list);
1546 		if (async)
1547 			list_del(&async->list);
1548 		spin_unlock_irqrestore(&map->async_lock, flags);
1549 
1550 		if (!async) {
1551 			async = map->bus->async_alloc();
1552 			if (!async)
1553 				return -ENOMEM;
1554 
1555 			async->work_buf = kzalloc(map->format.buf_size,
1556 						  GFP_KERNEL | GFP_DMA);
1557 			if (!async->work_buf) {
1558 				kfree(async);
1559 				return -ENOMEM;
1560 			}
1561 		}
1562 
1563 		async->map = map;
1564 
1565 		/* If the caller supplied the value we can use it safely. */
1566 		memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1567 		       map->format.reg_bytes + map->format.val_bytes);
1568 
1569 		spin_lock_irqsave(&map->async_lock, flags);
1570 		list_add_tail(&async->list, &map->async_list);
1571 		spin_unlock_irqrestore(&map->async_lock, flags);
1572 
1573 		if (val != work_val)
1574 			ret = map->bus->async_write(map->bus_context,
1575 						    async->work_buf,
1576 						    map->format.reg_bytes +
1577 						    map->format.pad_bytes,
1578 						    val, val_len, async);
1579 		else
1580 			ret = map->bus->async_write(map->bus_context,
1581 						    async->work_buf,
1582 						    map->format.reg_bytes +
1583 						    map->format.pad_bytes +
1584 						    val_len, NULL, 0, async);
1585 
1586 		if (ret != 0) {
1587 			dev_err(map->dev, "Failed to schedule write: %d\n",
1588 				ret);
1589 
1590 			spin_lock_irqsave(&map->async_lock, flags);
1591 			list_move(&async->list, &map->async_free);
1592 			spin_unlock_irqrestore(&map->async_lock, flags);
1593 		}
1594 
1595 		return ret;
1596 	}
1597 
1598 	trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1599 
1600 	/* If we're doing a single register write we can probably just
1601 	 * send the work_buf directly, otherwise try to do a gather
1602 	 * write.
1603 	 */
1604 	if (val == work_val)
1605 		ret = map->bus->write(map->bus_context, map->work_buf,
1606 				      map->format.reg_bytes +
1607 				      map->format.pad_bytes +
1608 				      val_len);
1609 	else if (map->bus->gather_write)
1610 		ret = map->bus->gather_write(map->bus_context, map->work_buf,
1611 					     map->format.reg_bytes +
1612 					     map->format.pad_bytes,
1613 					     val, val_len);
1614 	else
1615 		ret = -ENOTSUPP;
1616 
1617 	/* If that didn't work fall back on linearising by hand. */
1618 	if (ret == -ENOTSUPP) {
1619 		len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1620 		buf = kzalloc(len, GFP_KERNEL);
1621 		if (!buf)
1622 			return -ENOMEM;
1623 
1624 		memcpy(buf, map->work_buf, map->format.reg_bytes);
1625 		memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1626 		       val, val_len);
1627 		ret = map->bus->write(map->bus_context, buf, len);
1628 
1629 		kfree(buf);
1630 	} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1631 		/* regcache_drop_region() takes lock that we already have,
1632 		 * thus call map->cache_ops->drop() directly
1633 		 */
1634 		if (map->cache_ops && map->cache_ops->drop)
1635 			map->cache_ops->drop(map, reg, reg + 1);
1636 	}
1637 
1638 	trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1639 
1640 	return ret;
1641 }
1642 
1643 /**
1644  * regmap_can_raw_write - Test if regmap_raw_write() is supported
1645  *
1646  * @map: Map to check.
1647  */
regmap_can_raw_write(struct regmap * map)1648 bool regmap_can_raw_write(struct regmap *map)
1649 {
1650 	return map->bus && map->bus->write && map->format.format_val &&
1651 		map->format.format_reg;
1652 }
1653 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1654 
1655 /**
1656  * regmap_get_raw_read_max - Get the maximum size we can read
1657  *
1658  * @map: Map to check.
1659  */
regmap_get_raw_read_max(struct regmap * map)1660 size_t regmap_get_raw_read_max(struct regmap *map)
1661 {
1662 	return map->max_raw_read;
1663 }
1664 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1665 
1666 /**
1667  * regmap_get_raw_write_max - Get the maximum size we can read
1668  *
1669  * @map: Map to check.
1670  */
regmap_get_raw_write_max(struct regmap * map)1671 size_t regmap_get_raw_write_max(struct regmap *map)
1672 {
1673 	return map->max_raw_write;
1674 }
1675 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1676 
_regmap_bus_formatted_write(void * context,unsigned int reg,unsigned int val)1677 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1678 				       unsigned int val)
1679 {
1680 	int ret;
1681 	struct regmap_range_node *range;
1682 	struct regmap *map = context;
1683 
1684 	WARN_ON(!map->bus || !map->format.format_write);
1685 
1686 	range = _regmap_range_lookup(map, reg);
1687 	if (range) {
1688 		ret = _regmap_select_page(map, &reg, range, 1);
1689 		if (ret != 0)
1690 			return ret;
1691 	}
1692 
1693 	map->format.format_write(map, reg, val);
1694 
1695 	trace_regmap_hw_write_start(map, reg, 1);
1696 
1697 	ret = map->bus->write(map->bus_context, map->work_buf,
1698 			      map->format.buf_size);
1699 
1700 	trace_regmap_hw_write_done(map, reg, 1);
1701 
1702 	return ret;
1703 }
1704 
_regmap_bus_reg_write(void * context,unsigned int reg,unsigned int val)1705 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1706 				 unsigned int val)
1707 {
1708 	struct regmap *map = context;
1709 
1710 	return map->bus->reg_write(map->bus_context, reg, val);
1711 }
1712 
_regmap_bus_raw_write(void * context,unsigned int reg,unsigned int val)1713 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1714 				 unsigned int val)
1715 {
1716 	struct regmap *map = context;
1717 
1718 	WARN_ON(!map->bus || !map->format.format_val);
1719 
1720 	map->format.format_val(map->work_buf + map->format.reg_bytes
1721 			       + map->format.pad_bytes, val, 0);
1722 	return _regmap_raw_write_impl(map, reg,
1723 				      map->work_buf +
1724 				      map->format.reg_bytes +
1725 				      map->format.pad_bytes,
1726 				      map->format.val_bytes);
1727 }
1728 
_regmap_map_get_context(struct regmap * map)1729 static inline void *_regmap_map_get_context(struct regmap *map)
1730 {
1731 	return (map->bus) ? map : map->bus_context;
1732 }
1733 
_regmap_write(struct regmap * map,unsigned int reg,unsigned int val)1734 int _regmap_write(struct regmap *map, unsigned int reg,
1735 		  unsigned int val)
1736 {
1737 	int ret;
1738 	void *context = _regmap_map_get_context(map);
1739 
1740 	if (!regmap_writeable(map, reg))
1741 		return -EIO;
1742 
1743 	if (!map->cache_bypass && !map->defer_caching) {
1744 		ret = regcache_write(map, reg, val);
1745 		if (ret != 0)
1746 			return ret;
1747 		if (map->cache_only) {
1748 			map->cache_dirty = true;
1749 			return 0;
1750 		}
1751 	}
1752 
1753 #ifdef LOG_DEVICE
1754 	if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1755 		dev_info(map->dev, "%x <= %x\n", reg, val);
1756 #endif
1757 
1758 	trace_regmap_reg_write(map, reg, val);
1759 
1760 	return map->reg_write(context, reg, val);
1761 }
1762 
1763 /**
1764  * regmap_write() - Write a value to a single register
1765  *
1766  * @map: Register map to write to
1767  * @reg: Register to write to
1768  * @val: Value to be written
1769  *
1770  * A value of zero will be returned on success, a negative errno will
1771  * be returned in error cases.
1772  */
regmap_write(struct regmap * map,unsigned int reg,unsigned int val)1773 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1774 {
1775 	int ret;
1776 
1777 	if (!IS_ALIGNED(reg, map->reg_stride))
1778 		return -EINVAL;
1779 
1780 	map->lock(map->lock_arg);
1781 
1782 	ret = _regmap_write(map, reg, val);
1783 
1784 	map->unlock(map->lock_arg);
1785 
1786 	return ret;
1787 }
1788 EXPORT_SYMBOL_GPL(regmap_write);
1789 
1790 /**
1791  * regmap_write_async() - Write a value to a single register asynchronously
1792  *
1793  * @map: Register map to write to
1794  * @reg: Register to write to
1795  * @val: Value to be written
1796  *
1797  * A value of zero will be returned on success, a negative errno will
1798  * be returned in error cases.
1799  */
regmap_write_async(struct regmap * map,unsigned int reg,unsigned int val)1800 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1801 {
1802 	int ret;
1803 
1804 	if (!IS_ALIGNED(reg, map->reg_stride))
1805 		return -EINVAL;
1806 
1807 	map->lock(map->lock_arg);
1808 
1809 	map->async = true;
1810 
1811 	ret = _regmap_write(map, reg, val);
1812 
1813 	map->async = false;
1814 
1815 	map->unlock(map->lock_arg);
1816 
1817 	return ret;
1818 }
1819 EXPORT_SYMBOL_GPL(regmap_write_async);
1820 
_regmap_raw_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len)1821 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1822 		      const void *val, size_t val_len)
1823 {
1824 	size_t val_bytes = map->format.val_bytes;
1825 	size_t val_count = val_len / val_bytes;
1826 	size_t chunk_count, chunk_bytes;
1827 	size_t chunk_regs = val_count;
1828 	size_t max_data = map->max_raw_write - map->format.reg_bytes -
1829 			map->format.pad_bytes;
1830 	int ret, i;
1831 
1832 	if (!val_count)
1833 		return -EINVAL;
1834 
1835 	if (map->use_single_write)
1836 		chunk_regs = 1;
1837 	else if (map->max_raw_write && val_len > max_data)
1838 		chunk_regs = max_data / val_bytes;
1839 
1840 	chunk_count = val_count / chunk_regs;
1841 	chunk_bytes = chunk_regs * val_bytes;
1842 
1843 	/* Write as many bytes as possible with chunk_size */
1844 	for (i = 0; i < chunk_count; i++) {
1845 		ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes);
1846 		if (ret)
1847 			return ret;
1848 
1849 		reg += regmap_get_offset(map, chunk_regs);
1850 		val += chunk_bytes;
1851 		val_len -= chunk_bytes;
1852 	}
1853 
1854 	/* Write remaining bytes */
1855 	if (val_len)
1856 		ret = _regmap_raw_write_impl(map, reg, val, val_len);
1857 
1858 	return ret;
1859 }
1860 
1861 /**
1862  * regmap_raw_write() - Write raw values to one or more registers
1863  *
1864  * @map: Register map to write to
1865  * @reg: Initial register to write to
1866  * @val: Block of data to be written, laid out for direct transmission to the
1867  *       device
1868  * @val_len: Length of data pointed to by val.
1869  *
1870  * This function is intended to be used for things like firmware
1871  * download where a large block of data needs to be transferred to the
1872  * device.  No formatting will be done on the data provided.
1873  *
1874  * A value of zero will be returned on success, a negative errno will
1875  * be returned in error cases.
1876  */
regmap_raw_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len)1877 int regmap_raw_write(struct regmap *map, unsigned int reg,
1878 		     const void *val, size_t val_len)
1879 {
1880 	int ret;
1881 
1882 	if (!regmap_can_raw_write(map))
1883 		return -EINVAL;
1884 	if (val_len % map->format.val_bytes)
1885 		return -EINVAL;
1886 
1887 	map->lock(map->lock_arg);
1888 
1889 	ret = _regmap_raw_write(map, reg, val, val_len);
1890 
1891 	map->unlock(map->lock_arg);
1892 
1893 	return ret;
1894 }
1895 EXPORT_SYMBOL_GPL(regmap_raw_write);
1896 
1897 /**
1898  * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1899  *                                   register field.
1900  *
1901  * @field: Register field to write to
1902  * @mask: Bitmask to change
1903  * @val: Value to be written
1904  * @change: Boolean indicating if a write was done
1905  * @async: Boolean indicating asynchronously
1906  * @force: Boolean indicating use force update
1907  *
1908  * Perform a read/modify/write cycle on the register field with change,
1909  * async, force option.
1910  *
1911  * A value of zero will be returned on success, a negative errno will
1912  * be returned in error cases.
1913  */
regmap_field_update_bits_base(struct regmap_field * field,unsigned int mask,unsigned int val,bool * change,bool async,bool force)1914 int regmap_field_update_bits_base(struct regmap_field *field,
1915 				  unsigned int mask, unsigned int val,
1916 				  bool *change, bool async, bool force)
1917 {
1918 	mask = (mask << field->shift) & field->mask;
1919 
1920 	return regmap_update_bits_base(field->regmap, field->reg,
1921 				       mask, val << field->shift,
1922 				       change, async, force);
1923 }
1924 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
1925 
1926 /**
1927  * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
1928  *                                    register field with port ID
1929  *
1930  * @field: Register field to write to
1931  * @id: port ID
1932  * @mask: Bitmask to change
1933  * @val: Value to be written
1934  * @change: Boolean indicating if a write was done
1935  * @async: Boolean indicating asynchronously
1936  * @force: Boolean indicating use force update
1937  *
1938  * A value of zero will be returned on success, a negative errno will
1939  * be returned in error cases.
1940  */
regmap_fields_update_bits_base(struct regmap_field * field,unsigned int id,unsigned int mask,unsigned int val,bool * change,bool async,bool force)1941 int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
1942 				   unsigned int mask, unsigned int val,
1943 				   bool *change, bool async, bool force)
1944 {
1945 	if (id >= field->id_size)
1946 		return -EINVAL;
1947 
1948 	mask = (mask << field->shift) & field->mask;
1949 
1950 	return regmap_update_bits_base(field->regmap,
1951 				       field->reg + (field->id_offset * id),
1952 				       mask, val << field->shift,
1953 				       change, async, force);
1954 }
1955 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
1956 
1957 /**
1958  * regmap_bulk_write() - Write multiple registers to the device
1959  *
1960  * @map: Register map to write to
1961  * @reg: First register to be write from
1962  * @val: Block of data to be written, in native register size for device
1963  * @val_count: Number of registers to write
1964  *
1965  * This function is intended to be used for writing a large block of
1966  * data to the device either in single transfer or multiple transfer.
1967  *
1968  * A value of zero will be returned on success, a negative errno will
1969  * be returned in error cases.
1970  */
regmap_bulk_write(struct regmap * map,unsigned int reg,const void * val,size_t val_count)1971 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1972 		     size_t val_count)
1973 {
1974 	int ret = 0, i;
1975 	size_t val_bytes = map->format.val_bytes;
1976 
1977 	if (!IS_ALIGNED(reg, map->reg_stride))
1978 		return -EINVAL;
1979 
1980 	/*
1981 	 * Some devices don't support bulk write, for them we have a series of
1982 	 * single write operations.
1983 	 */
1984 	if (!map->bus || !map->format.parse_inplace) {
1985 		map->lock(map->lock_arg);
1986 		for (i = 0; i < val_count; i++) {
1987 			unsigned int ival;
1988 
1989 			switch (val_bytes) {
1990 			case 1:
1991 				ival = *(u8 *)(val + (i * val_bytes));
1992 				break;
1993 			case 2:
1994 				ival = *(u16 *)(val + (i * val_bytes));
1995 				break;
1996 			case 4:
1997 				ival = *(u32 *)(val + (i * val_bytes));
1998 				break;
1999 #ifdef CONFIG_64BIT
2000 			case 8:
2001 				ival = *(u64 *)(val + (i * val_bytes));
2002 				break;
2003 #endif
2004 			default:
2005 				ret = -EINVAL;
2006 				goto out;
2007 			}
2008 
2009 			ret = _regmap_write(map,
2010 					    reg + regmap_get_offset(map, i),
2011 					    ival);
2012 			if (ret != 0)
2013 				goto out;
2014 		}
2015 out:
2016 		map->unlock(map->lock_arg);
2017 	} else {
2018 		void *wval;
2019 
2020 		wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2021 		if (!wval)
2022 			return -ENOMEM;
2023 
2024 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
2025 			map->format.parse_inplace(wval + i);
2026 
2027 		ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2028 
2029 		kfree(wval);
2030 	}
2031 	return ret;
2032 }
2033 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2034 
2035 /*
2036  * _regmap_raw_multi_reg_write()
2037  *
2038  * the (register,newvalue) pairs in regs have not been formatted, but
2039  * they are all in the same page and have been changed to being page
2040  * relative. The page register has been written if that was necessary.
2041  */
_regmap_raw_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,size_t num_regs)2042 static int _regmap_raw_multi_reg_write(struct regmap *map,
2043 				       const struct reg_sequence *regs,
2044 				       size_t num_regs)
2045 {
2046 	int ret;
2047 	void *buf;
2048 	int i;
2049 	u8 *u8;
2050 	size_t val_bytes = map->format.val_bytes;
2051 	size_t reg_bytes = map->format.reg_bytes;
2052 	size_t pad_bytes = map->format.pad_bytes;
2053 	size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2054 	size_t len = pair_size * num_regs;
2055 
2056 	if (!len)
2057 		return -EINVAL;
2058 
2059 	buf = kzalloc(len, GFP_KERNEL);
2060 	if (!buf)
2061 		return -ENOMEM;
2062 
2063 	/* We have to linearise by hand. */
2064 
2065 	u8 = buf;
2066 
2067 	for (i = 0; i < num_regs; i++) {
2068 		unsigned int reg = regs[i].reg;
2069 		unsigned int val = regs[i].def;
2070 		trace_regmap_hw_write_start(map, reg, 1);
2071 		map->format.format_reg(u8, reg, map->reg_shift);
2072 		u8 += reg_bytes + pad_bytes;
2073 		map->format.format_val(u8, val, 0);
2074 		u8 += val_bytes;
2075 	}
2076 	u8 = buf;
2077 	*u8 |= map->write_flag_mask;
2078 
2079 	ret = map->bus->write(map->bus_context, buf, len);
2080 
2081 	kfree(buf);
2082 
2083 	for (i = 0; i < num_regs; i++) {
2084 		int reg = regs[i].reg;
2085 		trace_regmap_hw_write_done(map, reg, 1);
2086 	}
2087 	return ret;
2088 }
2089 
_regmap_register_page(struct regmap * map,unsigned int reg,struct regmap_range_node * range)2090 static unsigned int _regmap_register_page(struct regmap *map,
2091 					  unsigned int reg,
2092 					  struct regmap_range_node *range)
2093 {
2094 	unsigned int win_page = (reg - range->range_min) / range->window_len;
2095 
2096 	return win_page;
2097 }
2098 
_regmap_range_multi_paged_reg_write(struct regmap * map,struct reg_sequence * regs,size_t num_regs)2099 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2100 					       struct reg_sequence *regs,
2101 					       size_t num_regs)
2102 {
2103 	int ret;
2104 	int i, n;
2105 	struct reg_sequence *base;
2106 	unsigned int this_page = 0;
2107 	unsigned int page_change = 0;
2108 	/*
2109 	 * the set of registers are not neccessarily in order, but
2110 	 * since the order of write must be preserved this algorithm
2111 	 * chops the set each time the page changes. This also applies
2112 	 * if there is a delay required at any point in the sequence.
2113 	 */
2114 	base = regs;
2115 	for (i = 0, n = 0; i < num_regs; i++, n++) {
2116 		unsigned int reg = regs[i].reg;
2117 		struct regmap_range_node *range;
2118 
2119 		range = _regmap_range_lookup(map, reg);
2120 		if (range) {
2121 			unsigned int win_page = _regmap_register_page(map, reg,
2122 								      range);
2123 
2124 			if (i == 0)
2125 				this_page = win_page;
2126 			if (win_page != this_page) {
2127 				this_page = win_page;
2128 				page_change = 1;
2129 			}
2130 		}
2131 
2132 		/* If we have both a page change and a delay make sure to
2133 		 * write the regs and apply the delay before we change the
2134 		 * page.
2135 		 */
2136 
2137 		if (page_change || regs[i].delay_us) {
2138 
2139 				/* For situations where the first write requires
2140 				 * a delay we need to make sure we don't call
2141 				 * raw_multi_reg_write with n=0
2142 				 * This can't occur with page breaks as we
2143 				 * never write on the first iteration
2144 				 */
2145 				if (regs[i].delay_us && i == 0)
2146 					n = 1;
2147 
2148 				ret = _regmap_raw_multi_reg_write(map, base, n);
2149 				if (ret != 0)
2150 					return ret;
2151 
2152 				if (regs[i].delay_us)
2153 					udelay(regs[i].delay_us);
2154 
2155 				base += n;
2156 				n = 0;
2157 
2158 				if (page_change) {
2159 					ret = _regmap_select_page(map,
2160 								  &base[n].reg,
2161 								  range, 1);
2162 					if (ret != 0)
2163 						return ret;
2164 
2165 					page_change = 0;
2166 				}
2167 
2168 		}
2169 
2170 	}
2171 	if (n > 0)
2172 		return _regmap_raw_multi_reg_write(map, base, n);
2173 	return 0;
2174 }
2175 
_regmap_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,size_t num_regs)2176 static int _regmap_multi_reg_write(struct regmap *map,
2177 				   const struct reg_sequence *regs,
2178 				   size_t num_regs)
2179 {
2180 	int i;
2181 	int ret;
2182 
2183 	if (!map->can_multi_write) {
2184 		for (i = 0; i < num_regs; i++) {
2185 			ret = _regmap_write(map, regs[i].reg, regs[i].def);
2186 			if (ret != 0)
2187 				return ret;
2188 
2189 			if (regs[i].delay_us)
2190 				udelay(regs[i].delay_us);
2191 		}
2192 		return 0;
2193 	}
2194 
2195 	if (!map->format.parse_inplace)
2196 		return -EINVAL;
2197 
2198 	if (map->writeable_reg)
2199 		for (i = 0; i < num_regs; i++) {
2200 			int reg = regs[i].reg;
2201 			if (!map->writeable_reg(map->dev, reg))
2202 				return -EINVAL;
2203 			if (!IS_ALIGNED(reg, map->reg_stride))
2204 				return -EINVAL;
2205 		}
2206 
2207 	if (!map->cache_bypass) {
2208 		for (i = 0; i < num_regs; i++) {
2209 			unsigned int val = regs[i].def;
2210 			unsigned int reg = regs[i].reg;
2211 			ret = regcache_write(map, reg, val);
2212 			if (ret) {
2213 				dev_err(map->dev,
2214 				"Error in caching of register: %x ret: %d\n",
2215 								reg, ret);
2216 				return ret;
2217 			}
2218 		}
2219 		if (map->cache_only) {
2220 			map->cache_dirty = true;
2221 			return 0;
2222 		}
2223 	}
2224 
2225 	WARN_ON(!map->bus);
2226 
2227 	for (i = 0; i < num_regs; i++) {
2228 		unsigned int reg = regs[i].reg;
2229 		struct regmap_range_node *range;
2230 
2231 		/* Coalesce all the writes between a page break or a delay
2232 		 * in a sequence
2233 		 */
2234 		range = _regmap_range_lookup(map, reg);
2235 		if (range || regs[i].delay_us) {
2236 			size_t len = sizeof(struct reg_sequence)*num_regs;
2237 			struct reg_sequence *base = kmemdup(regs, len,
2238 							   GFP_KERNEL);
2239 			if (!base)
2240 				return -ENOMEM;
2241 			ret = _regmap_range_multi_paged_reg_write(map, base,
2242 								  num_regs);
2243 			kfree(base);
2244 
2245 			return ret;
2246 		}
2247 	}
2248 	return _regmap_raw_multi_reg_write(map, regs, num_regs);
2249 }
2250 
2251 /**
2252  * regmap_multi_reg_write() - Write multiple registers to the device
2253  *
2254  * @map: Register map to write to
2255  * @regs: Array of structures containing register,value to be written
2256  * @num_regs: Number of registers to write
2257  *
2258  * Write multiple registers to the device where the set of register, value
2259  * pairs are supplied in any order, possibly not all in a single range.
2260  *
2261  * The 'normal' block write mode will send ultimately send data on the
2262  * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2263  * addressed. However, this alternative block multi write mode will send
2264  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2265  * must of course support the mode.
2266  *
2267  * A value of zero will be returned on success, a negative errno will be
2268  * returned in error cases.
2269  */
regmap_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,int num_regs)2270 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2271 			   int num_regs)
2272 {
2273 	int ret;
2274 
2275 	map->lock(map->lock_arg);
2276 
2277 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2278 
2279 	map->unlock(map->lock_arg);
2280 
2281 	return ret;
2282 }
2283 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2284 
2285 /**
2286  * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2287  *                                     device but not the cache
2288  *
2289  * @map: Register map to write to
2290  * @regs: Array of structures containing register,value to be written
2291  * @num_regs: Number of registers to write
2292  *
2293  * Write multiple registers to the device but not the cache where the set
2294  * of register are supplied in any order.
2295  *
2296  * This function is intended to be used for writing a large block of data
2297  * atomically to the device in single transfer for those I2C client devices
2298  * that implement this alternative block write mode.
2299  *
2300  * A value of zero will be returned on success, a negative errno will
2301  * be returned in error cases.
2302  */
regmap_multi_reg_write_bypassed(struct regmap * map,const struct reg_sequence * regs,int num_regs)2303 int regmap_multi_reg_write_bypassed(struct regmap *map,
2304 				    const struct reg_sequence *regs,
2305 				    int num_regs)
2306 {
2307 	int ret;
2308 	bool bypass;
2309 
2310 	map->lock(map->lock_arg);
2311 
2312 	bypass = map->cache_bypass;
2313 	map->cache_bypass = true;
2314 
2315 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2316 
2317 	map->cache_bypass = bypass;
2318 
2319 	map->unlock(map->lock_arg);
2320 
2321 	return ret;
2322 }
2323 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2324 
2325 /**
2326  * regmap_raw_write_async() - Write raw values to one or more registers
2327  *                            asynchronously
2328  *
2329  * @map: Register map to write to
2330  * @reg: Initial register to write to
2331  * @val: Block of data to be written, laid out for direct transmission to the
2332  *       device.  Must be valid until regmap_async_complete() is called.
2333  * @val_len: Length of data pointed to by val.
2334  *
2335  * This function is intended to be used for things like firmware
2336  * download where a large block of data needs to be transferred to the
2337  * device.  No formatting will be done on the data provided.
2338  *
2339  * If supported by the underlying bus the write will be scheduled
2340  * asynchronously, helping maximise I/O speed on higher speed buses
2341  * like SPI.  regmap_async_complete() can be called to ensure that all
2342  * asynchrnous writes have been completed.
2343  *
2344  * A value of zero will be returned on success, a negative errno will
2345  * be returned in error cases.
2346  */
regmap_raw_write_async(struct regmap * map,unsigned int reg,const void * val,size_t val_len)2347 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2348 			   const void *val, size_t val_len)
2349 {
2350 	int ret;
2351 
2352 	if (val_len % map->format.val_bytes)
2353 		return -EINVAL;
2354 	if (!IS_ALIGNED(reg, map->reg_stride))
2355 		return -EINVAL;
2356 
2357 	map->lock(map->lock_arg);
2358 
2359 	map->async = true;
2360 
2361 	ret = _regmap_raw_write(map, reg, val, val_len);
2362 
2363 	map->async = false;
2364 
2365 	map->unlock(map->lock_arg);
2366 
2367 	return ret;
2368 }
2369 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2370 
_regmap_raw_read(struct regmap * map,unsigned int reg,void * val,unsigned int val_len,bool noinc)2371 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2372 			    unsigned int val_len, bool noinc)
2373 {
2374 	struct regmap_range_node *range;
2375 	int ret;
2376 
2377 	WARN_ON(!map->bus);
2378 
2379 	if (!map->bus || !map->bus->read)
2380 		return -EINVAL;
2381 
2382 	range = _regmap_range_lookup(map, reg);
2383 	if (range) {
2384 		ret = _regmap_select_page(map, &reg, range,
2385 					  noinc ? 1 : val_len / map->format.val_bytes);
2386 		if (ret != 0)
2387 			return ret;
2388 	}
2389 
2390 	map->format.format_reg(map->work_buf, reg, map->reg_shift);
2391 	regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2392 				      map->read_flag_mask);
2393 	trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2394 
2395 	ret = map->bus->read(map->bus_context, map->work_buf,
2396 			     map->format.reg_bytes + map->format.pad_bytes,
2397 			     val, val_len);
2398 
2399 	trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2400 
2401 	return ret;
2402 }
2403 
_regmap_bus_reg_read(void * context,unsigned int reg,unsigned int * val)2404 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2405 				unsigned int *val)
2406 {
2407 	struct regmap *map = context;
2408 
2409 	return map->bus->reg_read(map->bus_context, reg, val);
2410 }
2411 
_regmap_bus_read(void * context,unsigned int reg,unsigned int * val)2412 static int _regmap_bus_read(void *context, unsigned int reg,
2413 			    unsigned int *val)
2414 {
2415 	int ret;
2416 	struct regmap *map = context;
2417 	void *work_val = map->work_buf + map->format.reg_bytes +
2418 		map->format.pad_bytes;
2419 
2420 	if (!map->format.parse_val)
2421 		return -EINVAL;
2422 
2423 	ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2424 	if (ret == 0)
2425 		*val = map->format.parse_val(work_val);
2426 
2427 	return ret;
2428 }
2429 
_regmap_read(struct regmap * map,unsigned int reg,unsigned int * val)2430 static int _regmap_read(struct regmap *map, unsigned int reg,
2431 			unsigned int *val)
2432 {
2433 	int ret;
2434 	void *context = _regmap_map_get_context(map);
2435 
2436 	if (!map->cache_bypass) {
2437 		ret = regcache_read(map, reg, val);
2438 		if (ret == 0)
2439 			return 0;
2440 	}
2441 
2442 	if (map->cache_only)
2443 		return -EBUSY;
2444 
2445 	if (!regmap_readable(map, reg))
2446 		return -EIO;
2447 
2448 	ret = map->reg_read(context, reg, val);
2449 	if (ret == 0) {
2450 #ifdef LOG_DEVICE
2451 		if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2452 			dev_info(map->dev, "%x => %x\n", reg, *val);
2453 #endif
2454 
2455 		trace_regmap_reg_read(map, reg, *val);
2456 
2457 		if (!map->cache_bypass)
2458 			regcache_write(map, reg, *val);
2459 	}
2460 
2461 	return ret;
2462 }
2463 
2464 /**
2465  * regmap_read() - Read a value from a single register
2466  *
2467  * @map: Register map to read from
2468  * @reg: Register to be read from
2469  * @val: Pointer to store read value
2470  *
2471  * A value of zero will be returned on success, a negative errno will
2472  * be returned in error cases.
2473  */
regmap_read(struct regmap * map,unsigned int reg,unsigned int * val)2474 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2475 {
2476 	int ret;
2477 
2478 	if (!IS_ALIGNED(reg, map->reg_stride))
2479 		return -EINVAL;
2480 
2481 	map->lock(map->lock_arg);
2482 
2483 	ret = _regmap_read(map, reg, val);
2484 
2485 	map->unlock(map->lock_arg);
2486 
2487 	return ret;
2488 }
2489 EXPORT_SYMBOL_GPL(regmap_read);
2490 
2491 /**
2492  * regmap_raw_read() - Read raw data from the device
2493  *
2494  * @map: Register map to read from
2495  * @reg: First register to be read from
2496  * @val: Pointer to store read value
2497  * @val_len: Size of data to read
2498  *
2499  * A value of zero will be returned on success, a negative errno will
2500  * be returned in error cases.
2501  */
regmap_raw_read(struct regmap * map,unsigned int reg,void * val,size_t val_len)2502 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2503 		    size_t val_len)
2504 {
2505 	size_t val_bytes = map->format.val_bytes;
2506 	size_t val_count = val_len / val_bytes;
2507 	unsigned int v;
2508 	int ret, i;
2509 
2510 	if (!map->bus)
2511 		return -EINVAL;
2512 	if (val_len % map->format.val_bytes)
2513 		return -EINVAL;
2514 	if (!IS_ALIGNED(reg, map->reg_stride))
2515 		return -EINVAL;
2516 	if (val_count == 0)
2517 		return -EINVAL;
2518 
2519 	map->lock(map->lock_arg);
2520 
2521 	if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2522 	    map->cache_type == REGCACHE_NONE) {
2523 		size_t chunk_count, chunk_bytes;
2524 		size_t chunk_regs = val_count;
2525 
2526 		if (!map->bus->read) {
2527 			ret = -ENOTSUPP;
2528 			goto out;
2529 		}
2530 
2531 		if (map->use_single_read)
2532 			chunk_regs = 1;
2533 		else if (map->max_raw_read && val_len > map->max_raw_read)
2534 			chunk_regs = map->max_raw_read / val_bytes;
2535 
2536 		chunk_count = val_count / chunk_regs;
2537 		chunk_bytes = chunk_regs * val_bytes;
2538 
2539 		/* Read bytes that fit into whole chunks */
2540 		for (i = 0; i < chunk_count; i++) {
2541 			ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2542 			if (ret != 0)
2543 				goto out;
2544 
2545 			reg += regmap_get_offset(map, chunk_regs);
2546 			val += chunk_bytes;
2547 			val_len -= chunk_bytes;
2548 		}
2549 
2550 		/* Read remaining bytes */
2551 		if (val_len) {
2552 			ret = _regmap_raw_read(map, reg, val, val_len, false);
2553 			if (ret != 0)
2554 				goto out;
2555 		}
2556 	} else {
2557 		/* Otherwise go word by word for the cache; should be low
2558 		 * cost as we expect to hit the cache.
2559 		 */
2560 		for (i = 0; i < val_count; i++) {
2561 			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2562 					   &v);
2563 			if (ret != 0)
2564 				goto out;
2565 
2566 			map->format.format_val(val + (i * val_bytes), v, 0);
2567 		}
2568 	}
2569 
2570  out:
2571 	map->unlock(map->lock_arg);
2572 
2573 	return ret;
2574 }
2575 EXPORT_SYMBOL_GPL(regmap_raw_read);
2576 
2577 /**
2578  * regmap_noinc_read(): Read data from a register without incrementing the
2579  *			register number
2580  *
2581  * @map: Register map to read from
2582  * @reg: Register to read from
2583  * @val: Pointer to data buffer
2584  * @val_len: Length of output buffer in bytes.
2585  *
2586  * The regmap API usually assumes that bulk bus read operations will read a
2587  * range of registers. Some devices have certain registers for which a read
2588  * operation read will read from an internal FIFO.
2589  *
2590  * The target register must be volatile but registers after it can be
2591  * completely unrelated cacheable registers.
2592  *
2593  * This will attempt multiple reads as required to read val_len bytes.
2594  *
2595  * A value of zero will be returned on success, a negative errno will be
2596  * returned in error cases.
2597  */
regmap_noinc_read(struct regmap * map,unsigned int reg,void * val,size_t val_len)2598 int regmap_noinc_read(struct regmap *map, unsigned int reg,
2599 		      void *val, size_t val_len)
2600 {
2601 	size_t read_len;
2602 	int ret;
2603 
2604 	if (!map->bus)
2605 		return -EINVAL;
2606 	if (!map->bus->read)
2607 		return -ENOTSUPP;
2608 	if (val_len % map->format.val_bytes)
2609 		return -EINVAL;
2610 	if (!IS_ALIGNED(reg, map->reg_stride))
2611 		return -EINVAL;
2612 	if (val_len == 0)
2613 		return -EINVAL;
2614 
2615 	map->lock(map->lock_arg);
2616 
2617 	if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2618 		ret = -EINVAL;
2619 		goto out_unlock;
2620 	}
2621 
2622 	while (val_len) {
2623 		if (map->max_raw_read && map->max_raw_read < val_len)
2624 			read_len = map->max_raw_read;
2625 		else
2626 			read_len = val_len;
2627 		ret = _regmap_raw_read(map, reg, val, read_len, true);
2628 		if (ret)
2629 			goto out_unlock;
2630 		val = ((u8 *)val) + read_len;
2631 		val_len -= read_len;
2632 	}
2633 
2634 out_unlock:
2635 	map->unlock(map->lock_arg);
2636 	return ret;
2637 }
2638 EXPORT_SYMBOL_GPL(regmap_noinc_read);
2639 
2640 /**
2641  * regmap_field_read(): Read a value to a single register field
2642  *
2643  * @field: Register field to read from
2644  * @val: Pointer to store read value
2645  *
2646  * A value of zero will be returned on success, a negative errno will
2647  * be returned in error cases.
2648  */
regmap_field_read(struct regmap_field * field,unsigned int * val)2649 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2650 {
2651 	int ret;
2652 	unsigned int reg_val;
2653 	ret = regmap_read(field->regmap, field->reg, &reg_val);
2654 	if (ret != 0)
2655 		return ret;
2656 
2657 	reg_val &= field->mask;
2658 	reg_val >>= field->shift;
2659 	*val = reg_val;
2660 
2661 	return ret;
2662 }
2663 EXPORT_SYMBOL_GPL(regmap_field_read);
2664 
2665 /**
2666  * regmap_fields_read() - Read a value to a single register field with port ID
2667  *
2668  * @field: Register field to read from
2669  * @id: port ID
2670  * @val: Pointer to store read value
2671  *
2672  * A value of zero will be returned on success, a negative errno will
2673  * be returned in error cases.
2674  */
regmap_fields_read(struct regmap_field * field,unsigned int id,unsigned int * val)2675 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2676 		       unsigned int *val)
2677 {
2678 	int ret;
2679 	unsigned int reg_val;
2680 
2681 	if (id >= field->id_size)
2682 		return -EINVAL;
2683 
2684 	ret = regmap_read(field->regmap,
2685 			  field->reg + (field->id_offset * id),
2686 			  &reg_val);
2687 	if (ret != 0)
2688 		return ret;
2689 
2690 	reg_val &= field->mask;
2691 	reg_val >>= field->shift;
2692 	*val = reg_val;
2693 
2694 	return ret;
2695 }
2696 EXPORT_SYMBOL_GPL(regmap_fields_read);
2697 
2698 /**
2699  * regmap_bulk_read() - Read multiple registers from the device
2700  *
2701  * @map: Register map to read from
2702  * @reg: First register to be read from
2703  * @val: Pointer to store read value, in native register size for device
2704  * @val_count: Number of registers to read
2705  *
2706  * A value of zero will be returned on success, a negative errno will
2707  * be returned in error cases.
2708  */
regmap_bulk_read(struct regmap * map,unsigned int reg,void * val,size_t val_count)2709 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2710 		     size_t val_count)
2711 {
2712 	int ret, i;
2713 	size_t val_bytes = map->format.val_bytes;
2714 	bool vol = regmap_volatile_range(map, reg, val_count);
2715 
2716 	if (!IS_ALIGNED(reg, map->reg_stride))
2717 		return -EINVAL;
2718 	if (val_count == 0)
2719 		return -EINVAL;
2720 
2721 	if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2722 		ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2723 		if (ret != 0)
2724 			return ret;
2725 
2726 		for (i = 0; i < val_count * val_bytes; i += val_bytes)
2727 			map->format.parse_inplace(val + i);
2728 	} else {
2729 #ifdef CONFIG_64BIT
2730 		u64 *u64 = val;
2731 #endif
2732 		u32 *u32 = val;
2733 		u16 *u16 = val;
2734 		u8 *u8 = val;
2735 
2736 		map->lock(map->lock_arg);
2737 
2738 		for (i = 0; i < val_count; i++) {
2739 			unsigned int ival;
2740 
2741 			ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2742 					   &ival);
2743 			if (ret != 0)
2744 				goto out;
2745 
2746 			switch (map->format.val_bytes) {
2747 #ifdef CONFIG_64BIT
2748 			case 8:
2749 				u64[i] = ival;
2750 				break;
2751 #endif
2752 			case 4:
2753 				u32[i] = ival;
2754 				break;
2755 			case 2:
2756 				u16[i] = ival;
2757 				break;
2758 			case 1:
2759 				u8[i] = ival;
2760 				break;
2761 			default:
2762 				ret = -EINVAL;
2763 				goto out;
2764 			}
2765 		}
2766 
2767 out:
2768 		map->unlock(map->lock_arg);
2769 	}
2770 
2771 	return ret;
2772 }
2773 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2774 
_regmap_update_bits(struct regmap * map,unsigned int reg,unsigned int mask,unsigned int val,bool * change,bool force_write)2775 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2776 			       unsigned int mask, unsigned int val,
2777 			       bool *change, bool force_write)
2778 {
2779 	int ret;
2780 	unsigned int tmp, orig;
2781 
2782 	if (change)
2783 		*change = false;
2784 
2785 	if (regmap_volatile(map, reg) && map->reg_update_bits) {
2786 		ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2787 		if (ret == 0 && change)
2788 			*change = true;
2789 	} else {
2790 		ret = _regmap_read(map, reg, &orig);
2791 		if (ret != 0)
2792 			return ret;
2793 
2794 		tmp = orig & ~mask;
2795 		tmp |= val & mask;
2796 
2797 		if (force_write || (tmp != orig)) {
2798 			ret = _regmap_write(map, reg, tmp);
2799 			if (ret == 0 && change)
2800 				*change = true;
2801 		}
2802 	}
2803 
2804 	return ret;
2805 }
2806 
2807 /**
2808  * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
2809  *
2810  * @map: Register map to update
2811  * @reg: Register to update
2812  * @mask: Bitmask to change
2813  * @val: New value for bitmask
2814  * @change: Boolean indicating if a write was done
2815  * @async: Boolean indicating asynchronously
2816  * @force: Boolean indicating use force update
2817  *
2818  * Perform a read/modify/write cycle on a register map with change, async, force
2819  * options.
2820  *
2821  * If async is true:
2822  *
2823  * With most buses the read must be done synchronously so this is most useful
2824  * for devices with a cache which do not need to interact with the hardware to
2825  * determine the current register value.
2826  *
2827  * Returns zero for success, a negative number on error.
2828  */
regmap_update_bits_base(struct regmap * map,unsigned int reg,unsigned int mask,unsigned int val,bool * change,bool async,bool force)2829 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2830 			    unsigned int mask, unsigned int val,
2831 			    bool *change, bool async, bool force)
2832 {
2833 	int ret;
2834 
2835 	map->lock(map->lock_arg);
2836 
2837 	map->async = async;
2838 
2839 	ret = _regmap_update_bits(map, reg, mask, val, change, force);
2840 
2841 	map->async = false;
2842 
2843 	map->unlock(map->lock_arg);
2844 
2845 	return ret;
2846 }
2847 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
2848 
regmap_async_complete_cb(struct regmap_async * async,int ret)2849 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2850 {
2851 	struct regmap *map = async->map;
2852 	bool wake;
2853 
2854 	trace_regmap_async_io_complete(map);
2855 
2856 	spin_lock(&map->async_lock);
2857 	list_move(&async->list, &map->async_free);
2858 	wake = list_empty(&map->async_list);
2859 
2860 	if (ret != 0)
2861 		map->async_ret = ret;
2862 
2863 	spin_unlock(&map->async_lock);
2864 
2865 	if (wake)
2866 		wake_up(&map->async_waitq);
2867 }
2868 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2869 
regmap_async_is_done(struct regmap * map)2870 static int regmap_async_is_done(struct regmap *map)
2871 {
2872 	unsigned long flags;
2873 	int ret;
2874 
2875 	spin_lock_irqsave(&map->async_lock, flags);
2876 	ret = list_empty(&map->async_list);
2877 	spin_unlock_irqrestore(&map->async_lock, flags);
2878 
2879 	return ret;
2880 }
2881 
2882 /**
2883  * regmap_async_complete - Ensure all asynchronous I/O has completed.
2884  *
2885  * @map: Map to operate on.
2886  *
2887  * Blocks until any pending asynchronous I/O has completed.  Returns
2888  * an error code for any failed I/O operations.
2889  */
regmap_async_complete(struct regmap * map)2890 int regmap_async_complete(struct regmap *map)
2891 {
2892 	unsigned long flags;
2893 	int ret;
2894 
2895 	/* Nothing to do with no async support */
2896 	if (!map->bus || !map->bus->async_write)
2897 		return 0;
2898 
2899 	trace_regmap_async_complete_start(map);
2900 
2901 	wait_event(map->async_waitq, regmap_async_is_done(map));
2902 
2903 	spin_lock_irqsave(&map->async_lock, flags);
2904 	ret = map->async_ret;
2905 	map->async_ret = 0;
2906 	spin_unlock_irqrestore(&map->async_lock, flags);
2907 
2908 	trace_regmap_async_complete_done(map);
2909 
2910 	return ret;
2911 }
2912 EXPORT_SYMBOL_GPL(regmap_async_complete);
2913 
2914 /**
2915  * regmap_register_patch - Register and apply register updates to be applied
2916  *                         on device initialistion
2917  *
2918  * @map: Register map to apply updates to.
2919  * @regs: Values to update.
2920  * @num_regs: Number of entries in regs.
2921  *
2922  * Register a set of register updates to be applied to the device
2923  * whenever the device registers are synchronised with the cache and
2924  * apply them immediately.  Typically this is used to apply
2925  * corrections to be applied to the device defaults on startup, such
2926  * as the updates some vendors provide to undocumented registers.
2927  *
2928  * The caller must ensure that this function cannot be called
2929  * concurrently with either itself or regcache_sync().
2930  */
regmap_register_patch(struct regmap * map,const struct reg_sequence * regs,int num_regs)2931 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2932 			  int num_regs)
2933 {
2934 	struct reg_sequence *p;
2935 	int ret;
2936 	bool bypass;
2937 
2938 	if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2939 	    num_regs))
2940 		return 0;
2941 
2942 	p = krealloc(map->patch,
2943 		     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2944 		     GFP_KERNEL);
2945 	if (p) {
2946 		memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2947 		map->patch = p;
2948 		map->patch_regs += num_regs;
2949 	} else {
2950 		return -ENOMEM;
2951 	}
2952 
2953 	map->lock(map->lock_arg);
2954 
2955 	bypass = map->cache_bypass;
2956 
2957 	map->cache_bypass = true;
2958 	map->async = true;
2959 
2960 	ret = _regmap_multi_reg_write(map, regs, num_regs);
2961 
2962 	map->async = false;
2963 	map->cache_bypass = bypass;
2964 
2965 	map->unlock(map->lock_arg);
2966 
2967 	regmap_async_complete(map);
2968 
2969 	return ret;
2970 }
2971 EXPORT_SYMBOL_GPL(regmap_register_patch);
2972 
2973 /**
2974  * regmap_get_val_bytes() - Report the size of a register value
2975  *
2976  * @map: Register map to operate on.
2977  *
2978  * Report the size of a register value, mainly intended to for use by
2979  * generic infrastructure built on top of regmap.
2980  */
regmap_get_val_bytes(struct regmap * map)2981 int regmap_get_val_bytes(struct regmap *map)
2982 {
2983 	if (map->format.format_write)
2984 		return -EINVAL;
2985 
2986 	return map->format.val_bytes;
2987 }
2988 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2989 
2990 /**
2991  * regmap_get_max_register() - Report the max register value
2992  *
2993  * @map: Register map to operate on.
2994  *
2995  * Report the max register value, mainly intended to for use by
2996  * generic infrastructure built on top of regmap.
2997  */
regmap_get_max_register(struct regmap * map)2998 int regmap_get_max_register(struct regmap *map)
2999 {
3000 	return map->max_register ? map->max_register : -EINVAL;
3001 }
3002 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3003 
3004 /**
3005  * regmap_get_reg_stride() - Report the register address stride
3006  *
3007  * @map: Register map to operate on.
3008  *
3009  * Report the register address stride, mainly intended to for use by
3010  * generic infrastructure built on top of regmap.
3011  */
regmap_get_reg_stride(struct regmap * map)3012 int regmap_get_reg_stride(struct regmap *map)
3013 {
3014 	return map->reg_stride;
3015 }
3016 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3017 
regmap_parse_val(struct regmap * map,const void * buf,unsigned int * val)3018 int regmap_parse_val(struct regmap *map, const void *buf,
3019 			unsigned int *val)
3020 {
3021 	if (!map->format.parse_val)
3022 		return -EINVAL;
3023 
3024 	*val = map->format.parse_val(buf);
3025 
3026 	return 0;
3027 }
3028 EXPORT_SYMBOL_GPL(regmap_parse_val);
3029 
regmap_initcall(void)3030 static int __init regmap_initcall(void)
3031 {
3032 	regmap_debugfs_initcall();
3033 
3034 	return 0;
3035 }
3036 postcore_initcall(regmap_initcall);
3037