1 /*
2  * nvmem framework core.
3  *
4  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 and
9  * only version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  */
16 
17 #include <linux/device.h>
18 #include <linux/export.h>
19 #include <linux/fs.h>
20 #include <linux/idr.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/nvmem-consumer.h>
24 #include <linux/nvmem-provider.h>
25 #include <linux/of.h>
26 #include <linux/slab.h>
27 
28 struct nvmem_device {
29 	const char		*name;
30 	struct module		*owner;
31 	struct device		dev;
32 	int			stride;
33 	int			word_size;
34 	int			id;
35 	int			users;
36 	size_t			size;
37 	bool			read_only;
38 	int			flags;
39 	struct bin_attribute	eeprom;
40 	struct device		*base_dev;
41 	nvmem_reg_read_t	reg_read;
42 	nvmem_reg_write_t	reg_write;
43 	void *priv;
44 };
45 
46 #define FLAG_COMPAT		BIT(0)
47 
48 struct nvmem_cell {
49 	const char		*name;
50 	int			offset;
51 	int			bytes;
52 	int			bit_offset;
53 	int			nbits;
54 	struct nvmem_device	*nvmem;
55 	struct list_head	node;
56 };
57 
58 static DEFINE_MUTEX(nvmem_mutex);
59 static DEFINE_IDA(nvmem_ida);
60 
61 static LIST_HEAD(nvmem_cells);
62 static DEFINE_MUTEX(nvmem_cells_mutex);
63 
64 #ifdef CONFIG_DEBUG_LOCK_ALLOC
65 static struct lock_class_key eeprom_lock_key;
66 #endif
67 
68 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)69 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
70 			  void *val, size_t bytes)
71 {
72 	if (nvmem->reg_read)
73 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
74 
75 	return -EINVAL;
76 }
77 
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)78 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
79 			   void *val, size_t bytes)
80 {
81 	if (nvmem->reg_write)
82 		return nvmem->reg_write(nvmem->priv, offset, val, bytes);
83 
84 	return -EINVAL;
85 }
86 
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)87 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
88 				    struct bin_attribute *attr,
89 				    char *buf, loff_t pos, size_t count)
90 {
91 	struct device *dev;
92 	struct nvmem_device *nvmem;
93 	int rc;
94 
95 	if (attr->private)
96 		dev = attr->private;
97 	else
98 		dev = container_of(kobj, struct device, kobj);
99 	nvmem = to_nvmem_device(dev);
100 
101 	/* Stop the user from reading */
102 	if (pos >= nvmem->size)
103 		return 0;
104 
105 	if (count < nvmem->word_size)
106 		return -EINVAL;
107 
108 	if (pos + count > nvmem->size)
109 		count = nvmem->size - pos;
110 
111 	count = round_down(count, nvmem->word_size);
112 
113 	rc = nvmem_reg_read(nvmem, pos, buf, count);
114 
115 	if (rc)
116 		return rc;
117 
118 	return count;
119 }
120 
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)121 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
122 				     struct bin_attribute *attr,
123 				     char *buf, loff_t pos, size_t count)
124 {
125 	struct device *dev;
126 	struct nvmem_device *nvmem;
127 	int rc;
128 
129 	if (attr->private)
130 		dev = attr->private;
131 	else
132 		dev = container_of(kobj, struct device, kobj);
133 	nvmem = to_nvmem_device(dev);
134 
135 	/* Stop the user from writing */
136 	if (pos >= nvmem->size)
137 		return -EFBIG;
138 
139 	if (count < nvmem->word_size)
140 		return -EINVAL;
141 
142 	if (pos + count > nvmem->size)
143 		count = nvmem->size - pos;
144 
145 	count = round_down(count, nvmem->word_size);
146 
147 	rc = nvmem_reg_write(nvmem, pos, buf, count);
148 
149 	if (rc)
150 		return rc;
151 
152 	return count;
153 }
154 
155 /* default read/write permissions */
156 static struct bin_attribute bin_attr_rw_nvmem = {
157 	.attr	= {
158 		.name	= "nvmem",
159 		.mode	= S_IWUSR | S_IRUGO,
160 	},
161 	.read	= bin_attr_nvmem_read,
162 	.write	= bin_attr_nvmem_write,
163 };
164 
165 static struct bin_attribute *nvmem_bin_rw_attributes[] = {
166 	&bin_attr_rw_nvmem,
167 	NULL,
168 };
169 
170 static const struct attribute_group nvmem_bin_rw_group = {
171 	.bin_attrs	= nvmem_bin_rw_attributes,
172 };
173 
174 static const struct attribute_group *nvmem_rw_dev_groups[] = {
175 	&nvmem_bin_rw_group,
176 	NULL,
177 };
178 
179 /* read only permission */
180 static struct bin_attribute bin_attr_ro_nvmem = {
181 	.attr	= {
182 		.name	= "nvmem",
183 		.mode	= S_IRUGO,
184 	},
185 	.read	= bin_attr_nvmem_read,
186 };
187 
188 static struct bin_attribute *nvmem_bin_ro_attributes[] = {
189 	&bin_attr_ro_nvmem,
190 	NULL,
191 };
192 
193 static const struct attribute_group nvmem_bin_ro_group = {
194 	.bin_attrs	= nvmem_bin_ro_attributes,
195 };
196 
197 static const struct attribute_group *nvmem_ro_dev_groups[] = {
198 	&nvmem_bin_ro_group,
199 	NULL,
200 };
201 
202 /* default read/write permissions, root only */
203 static struct bin_attribute bin_attr_rw_root_nvmem = {
204 	.attr	= {
205 		.name	= "nvmem",
206 		.mode	= S_IWUSR | S_IRUSR,
207 	},
208 	.read	= bin_attr_nvmem_read,
209 	.write	= bin_attr_nvmem_write,
210 };
211 
212 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
213 	&bin_attr_rw_root_nvmem,
214 	NULL,
215 };
216 
217 static const struct attribute_group nvmem_bin_rw_root_group = {
218 	.bin_attrs	= nvmem_bin_rw_root_attributes,
219 };
220 
221 static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
222 	&nvmem_bin_rw_root_group,
223 	NULL,
224 };
225 
226 /* read only permission, root only */
227 static struct bin_attribute bin_attr_ro_root_nvmem = {
228 	.attr	= {
229 		.name	= "nvmem",
230 		.mode	= S_IRUSR,
231 	},
232 	.read	= bin_attr_nvmem_read,
233 };
234 
235 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
236 	&bin_attr_ro_root_nvmem,
237 	NULL,
238 };
239 
240 static const struct attribute_group nvmem_bin_ro_root_group = {
241 	.bin_attrs	= nvmem_bin_ro_root_attributes,
242 };
243 
244 static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
245 	&nvmem_bin_ro_root_group,
246 	NULL,
247 };
248 
nvmem_release(struct device * dev)249 static void nvmem_release(struct device *dev)
250 {
251 	struct nvmem_device *nvmem = to_nvmem_device(dev);
252 
253 	ida_simple_remove(&nvmem_ida, nvmem->id);
254 	kfree(nvmem);
255 }
256 
257 static const struct device_type nvmem_provider_type = {
258 	.release	= nvmem_release,
259 };
260 
261 static struct bus_type nvmem_bus_type = {
262 	.name		= "nvmem",
263 };
264 
of_nvmem_match(struct device * dev,void * nvmem_np)265 static int of_nvmem_match(struct device *dev, void *nvmem_np)
266 {
267 	return dev->of_node == nvmem_np;
268 }
269 
of_nvmem_find(struct device_node * nvmem_np)270 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
271 {
272 	struct device *d;
273 
274 	if (!nvmem_np)
275 		return NULL;
276 
277 	d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
278 
279 	if (!d)
280 		return NULL;
281 
282 	return to_nvmem_device(d);
283 }
284 
nvmem_find_cell(const char * cell_id)285 static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
286 {
287 	struct nvmem_cell *p;
288 
289 	mutex_lock(&nvmem_cells_mutex);
290 
291 	list_for_each_entry(p, &nvmem_cells, node)
292 		if (!strcmp(p->name, cell_id)) {
293 			mutex_unlock(&nvmem_cells_mutex);
294 			return p;
295 		}
296 
297 	mutex_unlock(&nvmem_cells_mutex);
298 
299 	return NULL;
300 }
301 
nvmem_cell_drop(struct nvmem_cell * cell)302 static void nvmem_cell_drop(struct nvmem_cell *cell)
303 {
304 	mutex_lock(&nvmem_cells_mutex);
305 	list_del(&cell->node);
306 	mutex_unlock(&nvmem_cells_mutex);
307 	kfree(cell);
308 }
309 
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)310 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
311 {
312 	struct nvmem_cell *cell;
313 	struct list_head *p, *n;
314 
315 	list_for_each_safe(p, n, &nvmem_cells) {
316 		cell = list_entry(p, struct nvmem_cell, node);
317 		if (cell->nvmem == nvmem)
318 			nvmem_cell_drop(cell);
319 	}
320 }
321 
nvmem_cell_add(struct nvmem_cell * cell)322 static void nvmem_cell_add(struct nvmem_cell *cell)
323 {
324 	mutex_lock(&nvmem_cells_mutex);
325 	list_add_tail(&cell->node, &nvmem_cells);
326 	mutex_unlock(&nvmem_cells_mutex);
327 }
328 
nvmem_cell_info_to_nvmem_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)329 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
330 				   const struct nvmem_cell_info *info,
331 				   struct nvmem_cell *cell)
332 {
333 	cell->nvmem = nvmem;
334 	cell->offset = info->offset;
335 	cell->bytes = info->bytes;
336 	cell->name = info->name;
337 
338 	cell->bit_offset = info->bit_offset;
339 	cell->nbits = info->nbits;
340 
341 	if (cell->nbits)
342 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
343 					   BITS_PER_BYTE);
344 
345 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
346 		dev_err(&nvmem->dev,
347 			"cell %s unaligned to nvmem stride %d\n",
348 			cell->name, nvmem->stride);
349 		return -EINVAL;
350 	}
351 
352 	return 0;
353 }
354 
355 /**
356  * nvmem_add_cells() - Add cell information to an nvmem device
357  *
358  * @nvmem: nvmem device to add cells to.
359  * @info: nvmem cell info to add to the device
360  * @ncells: number of cells in info
361  *
362  * Return: 0 or negative error code on failure.
363  */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)364 int nvmem_add_cells(struct nvmem_device *nvmem,
365 		    const struct nvmem_cell_info *info,
366 		    int ncells)
367 {
368 	struct nvmem_cell **cells;
369 	int i, rval;
370 
371 	cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
372 	if (!cells)
373 		return -ENOMEM;
374 
375 	for (i = 0; i < ncells; i++) {
376 		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
377 		if (!cells[i]) {
378 			rval = -ENOMEM;
379 			goto err;
380 		}
381 
382 		rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
383 		if (rval) {
384 			kfree(cells[i]);
385 			goto err;
386 		}
387 
388 		nvmem_cell_add(cells[i]);
389 	}
390 
391 	/* remove tmp array */
392 	kfree(cells);
393 
394 	return 0;
395 err:
396 	while (i--)
397 		nvmem_cell_drop(cells[i]);
398 
399 	kfree(cells);
400 
401 	return rval;
402 }
403 EXPORT_SYMBOL_GPL(nvmem_add_cells);
404 
405 /*
406  * nvmem_setup_compat() - Create an additional binary entry in
407  * drivers sys directory, to be backwards compatible with the older
408  * drivers/misc/eeprom drivers.
409  */
nvmem_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)410 static int nvmem_setup_compat(struct nvmem_device *nvmem,
411 			      const struct nvmem_config *config)
412 {
413 	int rval;
414 
415 	if (!config->base_dev)
416 		return -EINVAL;
417 
418 	if (nvmem->read_only) {
419 		if (config->root_only)
420 			nvmem->eeprom = bin_attr_ro_root_nvmem;
421 		else
422 			nvmem->eeprom = bin_attr_ro_nvmem;
423 	} else {
424 		if (config->root_only)
425 			nvmem->eeprom = bin_attr_rw_root_nvmem;
426 		else
427 			nvmem->eeprom = bin_attr_rw_nvmem;
428 	}
429 	nvmem->eeprom.attr.name = "eeprom";
430 	nvmem->eeprom.size = nvmem->size;
431 #ifdef CONFIG_DEBUG_LOCK_ALLOC
432 	nvmem->eeprom.attr.key = &eeprom_lock_key;
433 #endif
434 	nvmem->eeprom.private = &nvmem->dev;
435 	nvmem->base_dev = config->base_dev;
436 
437 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
438 	if (rval) {
439 		dev_err(&nvmem->dev,
440 			"Failed to create eeprom binary file %d\n", rval);
441 		return rval;
442 	}
443 
444 	nvmem->flags |= FLAG_COMPAT;
445 
446 	return 0;
447 }
448 
449 /**
450  * nvmem_register() - Register a nvmem device for given nvmem_config.
451  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
452  *
453  * @config: nvmem device configuration with which nvmem device is created.
454  *
455  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
456  * on success.
457  */
458 
nvmem_register(const struct nvmem_config * config)459 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
460 {
461 	struct nvmem_device *nvmem;
462 	int rval;
463 
464 	if (!config->dev)
465 		return ERR_PTR(-EINVAL);
466 
467 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
468 	if (!nvmem)
469 		return ERR_PTR(-ENOMEM);
470 
471 	rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
472 	if (rval < 0) {
473 		kfree(nvmem);
474 		return ERR_PTR(rval);
475 	}
476 
477 	nvmem->id = rval;
478 	nvmem->owner = config->owner;
479 	if (!nvmem->owner && config->dev->driver)
480 		nvmem->owner = config->dev->driver->owner;
481 	nvmem->stride = config->stride ?: 1;
482 	nvmem->word_size = config->word_size ?: 1;
483 	nvmem->size = config->size;
484 	nvmem->dev.type = &nvmem_provider_type;
485 	nvmem->dev.bus = &nvmem_bus_type;
486 	nvmem->dev.parent = config->dev;
487 	nvmem->priv = config->priv;
488 	nvmem->reg_read = config->reg_read;
489 	nvmem->reg_write = config->reg_write;
490 	nvmem->dev.of_node = config->dev->of_node;
491 
492 	if (config->id == -1 && config->name) {
493 		dev_set_name(&nvmem->dev, "%s", config->name);
494 	} else {
495 		dev_set_name(&nvmem->dev, "%s%d",
496 			     config->name ? : "nvmem",
497 			     config->name ? config->id : nvmem->id);
498 	}
499 
500 	nvmem->read_only = device_property_present(config->dev, "read-only") |
501 			   config->read_only;
502 
503 	if (config->root_only)
504 		nvmem->dev.groups = nvmem->read_only ?
505 			nvmem_ro_root_dev_groups :
506 			nvmem_rw_root_dev_groups;
507 	else
508 		nvmem->dev.groups = nvmem->read_only ?
509 			nvmem_ro_dev_groups :
510 			nvmem_rw_dev_groups;
511 
512 	device_initialize(&nvmem->dev);
513 
514 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
515 
516 	rval = device_add(&nvmem->dev);
517 	if (rval)
518 		goto err_put_device;
519 
520 	if (config->compat) {
521 		rval = nvmem_setup_compat(nvmem, config);
522 		if (rval)
523 			goto err_device_del;
524 	}
525 
526 	if (config->cells) {
527 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
528 		if (rval)
529 			goto err_teardown_compat;
530 	}
531 
532 	return nvmem;
533 
534 err_teardown_compat:
535 	if (config->compat)
536 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
537 err_device_del:
538 	device_del(&nvmem->dev);
539 err_put_device:
540 	put_device(&nvmem->dev);
541 
542 	return ERR_PTR(rval);
543 }
544 EXPORT_SYMBOL_GPL(nvmem_register);
545 
546 /**
547  * nvmem_unregister() - Unregister previously registered nvmem device
548  *
549  * @nvmem: Pointer to previously registered nvmem device.
550  *
551  * Return: Will be an negative on error or a zero on success.
552  */
nvmem_unregister(struct nvmem_device * nvmem)553 int nvmem_unregister(struct nvmem_device *nvmem)
554 {
555 	mutex_lock(&nvmem_mutex);
556 	if (nvmem->users) {
557 		mutex_unlock(&nvmem_mutex);
558 		return -EBUSY;
559 	}
560 	mutex_unlock(&nvmem_mutex);
561 
562 	if (nvmem->flags & FLAG_COMPAT)
563 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
564 
565 	nvmem_device_remove_all_cells(nvmem);
566 	device_del(&nvmem->dev);
567 	put_device(&nvmem->dev);
568 
569 	return 0;
570 }
571 EXPORT_SYMBOL_GPL(nvmem_unregister);
572 
devm_nvmem_release(struct device * dev,void * res)573 static void devm_nvmem_release(struct device *dev, void *res)
574 {
575 	WARN_ON(nvmem_unregister(*(struct nvmem_device **)res));
576 }
577 
578 /**
579  * devm_nvmem_register() - Register a managed nvmem device for given
580  * nvmem_config.
581  * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
582  *
583  * @dev: Device that uses the nvmem device.
584  * @config: nvmem device configuration with which nvmem device is created.
585  *
586  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
587  * on success.
588  */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)589 struct nvmem_device *devm_nvmem_register(struct device *dev,
590 					 const struct nvmem_config *config)
591 {
592 	struct nvmem_device **ptr, *nvmem;
593 
594 	ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
595 	if (!ptr)
596 		return ERR_PTR(-ENOMEM);
597 
598 	nvmem = nvmem_register(config);
599 
600 	if (!IS_ERR(nvmem)) {
601 		*ptr = nvmem;
602 		devres_add(dev, ptr);
603 	} else {
604 		devres_free(ptr);
605 	}
606 
607 	return nvmem;
608 }
609 EXPORT_SYMBOL_GPL(devm_nvmem_register);
610 
devm_nvmem_match(struct device * dev,void * res,void * data)611 static int devm_nvmem_match(struct device *dev, void *res, void *data)
612 {
613 	struct nvmem_device **r = res;
614 
615 	return *r == data;
616 }
617 
618 /**
619  * devm_nvmem_unregister() - Unregister previously registered managed nvmem
620  * device.
621  *
622  * @dev: Device that uses the nvmem device.
623  * @nvmem: Pointer to previously registered nvmem device.
624  *
625  * Return: Will be an negative on error or a zero on success.
626  */
devm_nvmem_unregister(struct device * dev,struct nvmem_device * nvmem)627 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
628 {
629 	return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
630 }
631 EXPORT_SYMBOL(devm_nvmem_unregister);
632 
633 
__nvmem_device_get(struct device_node * np,struct nvmem_cell ** cellp,const char * cell_id)634 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
635 					       struct nvmem_cell **cellp,
636 					       const char *cell_id)
637 {
638 	struct nvmem_device *nvmem = NULL;
639 
640 	mutex_lock(&nvmem_mutex);
641 
642 	if (np) {
643 		nvmem = of_nvmem_find(np);
644 		if (!nvmem) {
645 			mutex_unlock(&nvmem_mutex);
646 			return ERR_PTR(-EPROBE_DEFER);
647 		}
648 	} else {
649 		struct nvmem_cell *cell = nvmem_find_cell(cell_id);
650 
651 		if (cell) {
652 			nvmem = cell->nvmem;
653 			*cellp = cell;
654 		}
655 
656 		if (!nvmem) {
657 			mutex_unlock(&nvmem_mutex);
658 			return ERR_PTR(-ENOENT);
659 		}
660 	}
661 
662 	nvmem->users++;
663 	mutex_unlock(&nvmem_mutex);
664 
665 	if (!try_module_get(nvmem->owner)) {
666 		dev_err(&nvmem->dev,
667 			"could not increase module refcount for cell %s\n",
668 			nvmem->name);
669 
670 		mutex_lock(&nvmem_mutex);
671 		nvmem->users--;
672 		mutex_unlock(&nvmem_mutex);
673 
674 		return ERR_PTR(-EINVAL);
675 	}
676 
677 	return nvmem;
678 }
679 
__nvmem_device_put(struct nvmem_device * nvmem)680 static void __nvmem_device_put(struct nvmem_device *nvmem)
681 {
682 	module_put(nvmem->owner);
683 	mutex_lock(&nvmem_mutex);
684 	nvmem->users--;
685 	mutex_unlock(&nvmem_mutex);
686 }
687 
nvmem_find(const char * name)688 static struct nvmem_device *nvmem_find(const char *name)
689 {
690 	struct device *d;
691 
692 	d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
693 
694 	if (!d)
695 		return ERR_PTR(-ENOENT);
696 
697 	return to_nvmem_device(d);
698 }
699 
700 #if IS_ENABLED(CONFIG_OF)
701 /**
702  * of_nvmem_device_get() - Get nvmem device from a given id
703  *
704  * @np: Device tree node that uses the nvmem device.
705  * @id: nvmem name from nvmem-names property.
706  *
707  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
708  * on success.
709  */
of_nvmem_device_get(struct device_node * np,const char * id)710 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
711 {
712 
713 	struct device_node *nvmem_np;
714 	int index;
715 
716 	index = of_property_match_string(np, "nvmem-names", id);
717 
718 	nvmem_np = of_parse_phandle(np, "nvmem", index);
719 	if (!nvmem_np)
720 		return ERR_PTR(-EINVAL);
721 
722 	return __nvmem_device_get(nvmem_np, NULL, NULL);
723 }
724 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
725 #endif
726 
727 /**
728  * nvmem_device_get() - Get nvmem device from a given id
729  *
730  * @dev: Device that uses the nvmem device.
731  * @dev_name: name of the requested nvmem device.
732  *
733  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
734  * on success.
735  */
nvmem_device_get(struct device * dev,const char * dev_name)736 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
737 {
738 	if (dev->of_node) { /* try dt first */
739 		struct nvmem_device *nvmem;
740 
741 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
742 
743 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
744 			return nvmem;
745 
746 	}
747 
748 	return nvmem_find(dev_name);
749 }
750 EXPORT_SYMBOL_GPL(nvmem_device_get);
751 
devm_nvmem_device_match(struct device * dev,void * res,void * data)752 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
753 {
754 	struct nvmem_device **nvmem = res;
755 
756 	if (WARN_ON(!nvmem || !*nvmem))
757 		return 0;
758 
759 	return *nvmem == data;
760 }
761 
devm_nvmem_device_release(struct device * dev,void * res)762 static void devm_nvmem_device_release(struct device *dev, void *res)
763 {
764 	nvmem_device_put(*(struct nvmem_device **)res);
765 }
766 
767 /**
768  * devm_nvmem_device_put() - put alredy got nvmem device
769  *
770  * @dev: Device that uses the nvmem device.
771  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
772  * that needs to be released.
773  */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)774 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
775 {
776 	int ret;
777 
778 	ret = devres_release(dev, devm_nvmem_device_release,
779 			     devm_nvmem_device_match, nvmem);
780 
781 	WARN_ON(ret);
782 }
783 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
784 
785 /**
786  * nvmem_device_put() - put alredy got nvmem device
787  *
788  * @nvmem: pointer to nvmem device that needs to be released.
789  */
nvmem_device_put(struct nvmem_device * nvmem)790 void nvmem_device_put(struct nvmem_device *nvmem)
791 {
792 	__nvmem_device_put(nvmem);
793 }
794 EXPORT_SYMBOL_GPL(nvmem_device_put);
795 
796 /**
797  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
798  *
799  * @dev: Device that requests the nvmem device.
800  * @id: name id for the requested nvmem device.
801  *
802  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
803  * on success.  The nvmem_cell will be freed by the automatically once the
804  * device is freed.
805  */
devm_nvmem_device_get(struct device * dev,const char * id)806 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
807 {
808 	struct nvmem_device **ptr, *nvmem;
809 
810 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
811 	if (!ptr)
812 		return ERR_PTR(-ENOMEM);
813 
814 	nvmem = nvmem_device_get(dev, id);
815 	if (!IS_ERR(nvmem)) {
816 		*ptr = nvmem;
817 		devres_add(dev, ptr);
818 	} else {
819 		devres_free(ptr);
820 	}
821 
822 	return nvmem;
823 }
824 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
825 
nvmem_cell_get_from_list(const char * cell_id)826 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
827 {
828 	struct nvmem_cell *cell = NULL;
829 	struct nvmem_device *nvmem;
830 
831 	nvmem = __nvmem_device_get(NULL, &cell, cell_id);
832 	if (IS_ERR(nvmem))
833 		return ERR_CAST(nvmem);
834 
835 	return cell;
836 }
837 
838 #if IS_ENABLED(CONFIG_OF)
839 /**
840  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
841  *
842  * @np: Device tree node that uses the nvmem cell.
843  * @name: nvmem cell name from nvmem-cell-names property, or NULL
844  *	  for the cell at index 0 (the lone cell with no accompanying
845  *	  nvmem-cell-names property).
846  *
847  * Return: Will be an ERR_PTR() on error or a valid pointer
848  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
849  * nvmem_cell_put().
850  */
of_nvmem_cell_get(struct device_node * np,const char * name)851 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
852 					    const char *name)
853 {
854 	struct device_node *cell_np, *nvmem_np;
855 	struct nvmem_cell *cell;
856 	struct nvmem_device *nvmem;
857 	const __be32 *addr;
858 	int rval, len;
859 	int index = 0;
860 
861 	/* if cell name exists, find index to the name */
862 	if (name)
863 		index = of_property_match_string(np, "nvmem-cell-names", name);
864 
865 	cell_np = of_parse_phandle(np, "nvmem-cells", index);
866 	if (!cell_np)
867 		return ERR_PTR(-EINVAL);
868 
869 	nvmem_np = of_get_next_parent(cell_np);
870 	if (!nvmem_np)
871 		return ERR_PTR(-EINVAL);
872 
873 	nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
874 	of_node_put(nvmem_np);
875 	if (IS_ERR(nvmem))
876 		return ERR_CAST(nvmem);
877 
878 	addr = of_get_property(cell_np, "reg", &len);
879 	if (!addr || (len < 2 * sizeof(u32))) {
880 		dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
881 			cell_np);
882 		rval  = -EINVAL;
883 		goto err_mem;
884 	}
885 
886 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
887 	if (!cell) {
888 		rval = -ENOMEM;
889 		goto err_mem;
890 	}
891 
892 	cell->nvmem = nvmem;
893 	cell->offset = be32_to_cpup(addr++);
894 	cell->bytes = be32_to_cpup(addr);
895 	cell->name = cell_np->name;
896 
897 	addr = of_get_property(cell_np, "bits", &len);
898 	if (addr && len == (2 * sizeof(u32))) {
899 		cell->bit_offset = be32_to_cpup(addr++);
900 		cell->nbits = be32_to_cpup(addr);
901 	}
902 
903 	if (cell->nbits)
904 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
905 					   BITS_PER_BYTE);
906 
907 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
908 			dev_err(&nvmem->dev,
909 				"cell %s unaligned to nvmem stride %d\n",
910 				cell->name, nvmem->stride);
911 		rval  = -EINVAL;
912 		goto err_sanity;
913 	}
914 
915 	nvmem_cell_add(cell);
916 
917 	return cell;
918 
919 err_sanity:
920 	kfree(cell);
921 
922 err_mem:
923 	__nvmem_device_put(nvmem);
924 
925 	return ERR_PTR(rval);
926 }
927 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
928 #endif
929 
930 /**
931  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
932  *
933  * @dev: Device that requests the nvmem cell.
934  * @cell_id: nvmem cell name to get.
935  *
936  * Return: Will be an ERR_PTR() on error or a valid pointer
937  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
938  * nvmem_cell_put().
939  */
nvmem_cell_get(struct device * dev,const char * cell_id)940 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
941 {
942 	struct nvmem_cell *cell;
943 
944 	if (dev->of_node) { /* try dt first */
945 		cell = of_nvmem_cell_get(dev->of_node, cell_id);
946 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
947 			return cell;
948 	}
949 
950 	/* NULL cell_id only allowed for device tree; invalid otherwise */
951 	if (!cell_id)
952 		return ERR_PTR(-EINVAL);
953 
954 	return nvmem_cell_get_from_list(cell_id);
955 }
956 EXPORT_SYMBOL_GPL(nvmem_cell_get);
957 
devm_nvmem_cell_release(struct device * dev,void * res)958 static void devm_nvmem_cell_release(struct device *dev, void *res)
959 {
960 	nvmem_cell_put(*(struct nvmem_cell **)res);
961 }
962 
963 /**
964  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
965  *
966  * @dev: Device that requests the nvmem cell.
967  * @id: nvmem cell name id to get.
968  *
969  * Return: Will be an ERR_PTR() on error or a valid pointer
970  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
971  * automatically once the device is freed.
972  */
devm_nvmem_cell_get(struct device * dev,const char * id)973 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
974 {
975 	struct nvmem_cell **ptr, *cell;
976 
977 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
978 	if (!ptr)
979 		return ERR_PTR(-ENOMEM);
980 
981 	cell = nvmem_cell_get(dev, id);
982 	if (!IS_ERR(cell)) {
983 		*ptr = cell;
984 		devres_add(dev, ptr);
985 	} else {
986 		devres_free(ptr);
987 	}
988 
989 	return cell;
990 }
991 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
992 
devm_nvmem_cell_match(struct device * dev,void * res,void * data)993 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
994 {
995 	struct nvmem_cell **c = res;
996 
997 	if (WARN_ON(!c || !*c))
998 		return 0;
999 
1000 	return *c == data;
1001 }
1002 
1003 /**
1004  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1005  * from devm_nvmem_cell_get.
1006  *
1007  * @dev: Device that requests the nvmem cell.
1008  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1009  */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1010 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1011 {
1012 	int ret;
1013 
1014 	ret = devres_release(dev, devm_nvmem_cell_release,
1015 				devm_nvmem_cell_match, cell);
1016 
1017 	WARN_ON(ret);
1018 }
1019 EXPORT_SYMBOL(devm_nvmem_cell_put);
1020 
1021 /**
1022  * nvmem_cell_put() - Release previously allocated nvmem cell.
1023  *
1024  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1025  */
nvmem_cell_put(struct nvmem_cell * cell)1026 void nvmem_cell_put(struct nvmem_cell *cell)
1027 {
1028 	struct nvmem_device *nvmem = cell->nvmem;
1029 
1030 	__nvmem_device_put(nvmem);
1031 	nvmem_cell_drop(cell);
1032 }
1033 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1034 
nvmem_shift_read_buffer_in_place(struct nvmem_cell * cell,void * buf)1035 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1036 {
1037 	u8 *p, *b;
1038 	int i, extra, bit_offset = cell->bit_offset;
1039 
1040 	p = b = buf;
1041 	if (bit_offset) {
1042 		/* First shift */
1043 		*b++ >>= bit_offset;
1044 
1045 		/* setup rest of the bytes if any */
1046 		for (i = 1; i < cell->bytes; i++) {
1047 			/* Get bits from next byte and shift them towards msb */
1048 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1049 
1050 			p = b;
1051 			*b++ >>= bit_offset;
1052 		}
1053 	} else {
1054 		/* point to the msb */
1055 		p += cell->bytes - 1;
1056 	}
1057 
1058 	/* result fits in less bytes */
1059 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1060 	while (--extra >= 0)
1061 		*p-- = 0;
1062 
1063 	/* clear msb bits if any leftover in the last byte */
1064 	if (cell->nbits % BITS_PER_BYTE)
1065 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1066 }
1067 
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell * cell,void * buf,size_t * len)1068 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1069 		      struct nvmem_cell *cell,
1070 		      void *buf, size_t *len)
1071 {
1072 	int rc;
1073 
1074 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1075 
1076 	if (rc)
1077 		return rc;
1078 
1079 	/* shift bits in-place */
1080 	if (cell->bit_offset || cell->nbits)
1081 		nvmem_shift_read_buffer_in_place(cell, buf);
1082 
1083 	if (len)
1084 		*len = cell->bytes;
1085 
1086 	return 0;
1087 }
1088 
1089 /**
1090  * nvmem_cell_read() - Read a given nvmem cell
1091  *
1092  * @cell: nvmem cell to be read.
1093  * @len: pointer to length of cell which will be populated on successful read;
1094  *	 can be NULL.
1095  *
1096  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1097  * buffer should be freed by the consumer with a kfree().
1098  */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1099 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1100 {
1101 	struct nvmem_device *nvmem = cell->nvmem;
1102 	u8 *buf;
1103 	int rc;
1104 
1105 	if (!nvmem)
1106 		return ERR_PTR(-EINVAL);
1107 
1108 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1109 	if (!buf)
1110 		return ERR_PTR(-ENOMEM);
1111 
1112 	rc = __nvmem_cell_read(nvmem, cell, buf, len);
1113 	if (rc) {
1114 		kfree(buf);
1115 		return ERR_PTR(rc);
1116 	}
1117 
1118 	return buf;
1119 }
1120 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1121 
nvmem_cell_prepare_write_buffer(struct nvmem_cell * cell,u8 * _buf,int len)1122 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1123 					     u8 *_buf, int len)
1124 {
1125 	struct nvmem_device *nvmem = cell->nvmem;
1126 	int i, rc, nbits, bit_offset = cell->bit_offset;
1127 	u8 v, *p, *buf, *b, pbyte, pbits;
1128 
1129 	nbits = cell->nbits;
1130 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1131 	if (!buf)
1132 		return ERR_PTR(-ENOMEM);
1133 
1134 	memcpy(buf, _buf, len);
1135 	p = b = buf;
1136 
1137 	if (bit_offset) {
1138 		pbyte = *b;
1139 		*b <<= bit_offset;
1140 
1141 		/* setup the first byte with lsb bits from nvmem */
1142 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1143 		if (rc)
1144 			goto err;
1145 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1146 
1147 		/* setup rest of the byte if any */
1148 		for (i = 1; i < cell->bytes; i++) {
1149 			/* Get last byte bits and shift them towards lsb */
1150 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1151 			pbyte = *b;
1152 			p = b;
1153 			*b <<= bit_offset;
1154 			*b++ |= pbits;
1155 		}
1156 	}
1157 
1158 	/* if it's not end on byte boundary */
1159 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1160 		/* setup the last byte with msb bits from nvmem */
1161 		rc = nvmem_reg_read(nvmem,
1162 				    cell->offset + cell->bytes - 1, &v, 1);
1163 		if (rc)
1164 			goto err;
1165 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1166 
1167 	}
1168 
1169 	return buf;
1170 err:
1171 	kfree(buf);
1172 	return ERR_PTR(rc);
1173 }
1174 
1175 /**
1176  * nvmem_cell_write() - Write to a given nvmem cell
1177  *
1178  * @cell: nvmem cell to be written.
1179  * @buf: Buffer to be written.
1180  * @len: length of buffer to be written to nvmem cell.
1181  *
1182  * Return: length of bytes written or negative on failure.
1183  */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1184 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1185 {
1186 	struct nvmem_device *nvmem = cell->nvmem;
1187 	int rc;
1188 
1189 	if (!nvmem || nvmem->read_only ||
1190 	    (cell->bit_offset == 0 && len != cell->bytes))
1191 		return -EINVAL;
1192 
1193 	if (cell->bit_offset || cell->nbits) {
1194 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1195 		if (IS_ERR(buf))
1196 			return PTR_ERR(buf);
1197 	}
1198 
1199 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1200 
1201 	/* free the tmp buffer */
1202 	if (cell->bit_offset || cell->nbits)
1203 		kfree(buf);
1204 
1205 	if (rc)
1206 		return rc;
1207 
1208 	return len;
1209 }
1210 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1211 
1212 /**
1213  * nvmem_cell_read_u32() - Read a cell value as an u32
1214  *
1215  * @dev: Device that requests the nvmem cell.
1216  * @cell_id: Name of nvmem cell to read.
1217  * @val: pointer to output value.
1218  *
1219  * Return: 0 on success or negative errno.
1220  */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1221 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1222 {
1223 	struct nvmem_cell *cell;
1224 	void *buf;
1225 	size_t len;
1226 
1227 	cell = nvmem_cell_get(dev, cell_id);
1228 	if (IS_ERR(cell))
1229 		return PTR_ERR(cell);
1230 
1231 	buf = nvmem_cell_read(cell, &len);
1232 	if (IS_ERR(buf)) {
1233 		nvmem_cell_put(cell);
1234 		return PTR_ERR(buf);
1235 	}
1236 	if (len != sizeof(*val)) {
1237 		kfree(buf);
1238 		nvmem_cell_put(cell);
1239 		return -EINVAL;
1240 	}
1241 	memcpy(val, buf, sizeof(*val));
1242 
1243 	kfree(buf);
1244 	nvmem_cell_put(cell);
1245 	return 0;
1246 }
1247 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1248 
1249 /**
1250  * nvmem_device_cell_read() - Read a given nvmem device and cell
1251  *
1252  * @nvmem: nvmem device to read from.
1253  * @info: nvmem cell info to be read.
1254  * @buf: buffer pointer which will be populated on successful read.
1255  *
1256  * Return: length of successful bytes read on success and negative
1257  * error code on error.
1258  */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1259 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1260 			   struct nvmem_cell_info *info, void *buf)
1261 {
1262 	struct nvmem_cell cell;
1263 	int rc;
1264 	ssize_t len;
1265 
1266 	if (!nvmem)
1267 		return -EINVAL;
1268 
1269 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1270 	if (rc)
1271 		return rc;
1272 
1273 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1274 	if (rc)
1275 		return rc;
1276 
1277 	return len;
1278 }
1279 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1280 
1281 /**
1282  * nvmem_device_cell_write() - Write cell to a given nvmem device
1283  *
1284  * @nvmem: nvmem device to be written to.
1285  * @info: nvmem cell info to be written.
1286  * @buf: buffer to be written to cell.
1287  *
1288  * Return: length of bytes written or negative error code on failure.
1289  * */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1290 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1291 			    struct nvmem_cell_info *info, void *buf)
1292 {
1293 	struct nvmem_cell cell;
1294 	int rc;
1295 
1296 	if (!nvmem)
1297 		return -EINVAL;
1298 
1299 	rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1300 	if (rc)
1301 		return rc;
1302 
1303 	return nvmem_cell_write(&cell, buf, cell.bytes);
1304 }
1305 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1306 
1307 /**
1308  * nvmem_device_read() - Read from a given nvmem device
1309  *
1310  * @nvmem: nvmem device to read from.
1311  * @offset: offset in nvmem device.
1312  * @bytes: number of bytes to read.
1313  * @buf: buffer pointer which will be populated on successful read.
1314  *
1315  * Return: length of successful bytes read on success and negative
1316  * error code on error.
1317  */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1318 int nvmem_device_read(struct nvmem_device *nvmem,
1319 		      unsigned int offset,
1320 		      size_t bytes, void *buf)
1321 {
1322 	int rc;
1323 
1324 	if (!nvmem)
1325 		return -EINVAL;
1326 
1327 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1328 
1329 	if (rc)
1330 		return rc;
1331 
1332 	return bytes;
1333 }
1334 EXPORT_SYMBOL_GPL(nvmem_device_read);
1335 
1336 /**
1337  * nvmem_device_write() - Write cell to a given nvmem device
1338  *
1339  * @nvmem: nvmem device to be written to.
1340  * @offset: offset in nvmem device.
1341  * @bytes: number of bytes to write.
1342  * @buf: buffer to be written.
1343  *
1344  * Return: length of bytes written or negative error code on failure.
1345  * */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1346 int nvmem_device_write(struct nvmem_device *nvmem,
1347 		       unsigned int offset,
1348 		       size_t bytes, void *buf)
1349 {
1350 	int rc;
1351 
1352 	if (!nvmem)
1353 		return -EINVAL;
1354 
1355 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1356 
1357 	if (rc)
1358 		return rc;
1359 
1360 
1361 	return bytes;
1362 }
1363 EXPORT_SYMBOL_GPL(nvmem_device_write);
1364 
nvmem_init(void)1365 static int __init nvmem_init(void)
1366 {
1367 	return bus_register(&nvmem_bus_type);
1368 }
1369 
nvmem_exit(void)1370 static void __exit nvmem_exit(void)
1371 {
1372 	bus_unregister(&nvmem_bus_type);
1373 }
1374 
1375 subsys_initcall(nvmem_init);
1376 module_exit(nvmem_exit);
1377 
1378 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1379 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1380 MODULE_DESCRIPTION("nvmem Driver Core");
1381 MODULE_LICENSE("GPL v2");
1382