1 /*
2 * nvmem framework core.
3 *
4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 and
9 * only version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/device.h>
18 #include <linux/export.h>
19 #include <linux/fs.h>
20 #include <linux/idr.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/nvmem-consumer.h>
24 #include <linux/nvmem-provider.h>
25 #include <linux/of.h>
26 #include <linux/slab.h>
27
28 struct nvmem_device {
29 const char *name;
30 struct module *owner;
31 struct device dev;
32 int stride;
33 int word_size;
34 int id;
35 int users;
36 size_t size;
37 bool read_only;
38 int flags;
39 struct bin_attribute eeprom;
40 struct device *base_dev;
41 nvmem_reg_read_t reg_read;
42 nvmem_reg_write_t reg_write;
43 void *priv;
44 };
45
46 #define FLAG_COMPAT BIT(0)
47
48 struct nvmem_cell {
49 const char *name;
50 int offset;
51 int bytes;
52 int bit_offset;
53 int nbits;
54 struct nvmem_device *nvmem;
55 struct list_head node;
56 };
57
58 static DEFINE_MUTEX(nvmem_mutex);
59 static DEFINE_IDA(nvmem_ida);
60
61 static LIST_HEAD(nvmem_cells);
62 static DEFINE_MUTEX(nvmem_cells_mutex);
63
64 #ifdef CONFIG_DEBUG_LOCK_ALLOC
65 static struct lock_class_key eeprom_lock_key;
66 #endif
67
68 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)69 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
70 void *val, size_t bytes)
71 {
72 if (nvmem->reg_read)
73 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
74
75 return -EINVAL;
76 }
77
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)78 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
79 void *val, size_t bytes)
80 {
81 if (nvmem->reg_write)
82 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
83
84 return -EINVAL;
85 }
86
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)87 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
88 struct bin_attribute *attr,
89 char *buf, loff_t pos, size_t count)
90 {
91 struct device *dev;
92 struct nvmem_device *nvmem;
93 int rc;
94
95 if (attr->private)
96 dev = attr->private;
97 else
98 dev = container_of(kobj, struct device, kobj);
99 nvmem = to_nvmem_device(dev);
100
101 /* Stop the user from reading */
102 if (pos >= nvmem->size)
103 return 0;
104
105 if (count < nvmem->word_size)
106 return -EINVAL;
107
108 if (pos + count > nvmem->size)
109 count = nvmem->size - pos;
110
111 count = round_down(count, nvmem->word_size);
112
113 rc = nvmem_reg_read(nvmem, pos, buf, count);
114
115 if (rc)
116 return rc;
117
118 return count;
119 }
120
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)121 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
122 struct bin_attribute *attr,
123 char *buf, loff_t pos, size_t count)
124 {
125 struct device *dev;
126 struct nvmem_device *nvmem;
127 int rc;
128
129 if (attr->private)
130 dev = attr->private;
131 else
132 dev = container_of(kobj, struct device, kobj);
133 nvmem = to_nvmem_device(dev);
134
135 /* Stop the user from writing */
136 if (pos >= nvmem->size)
137 return -EFBIG;
138
139 if (count < nvmem->word_size)
140 return -EINVAL;
141
142 if (pos + count > nvmem->size)
143 count = nvmem->size - pos;
144
145 count = round_down(count, nvmem->word_size);
146
147 rc = nvmem_reg_write(nvmem, pos, buf, count);
148
149 if (rc)
150 return rc;
151
152 return count;
153 }
154
155 /* default read/write permissions */
156 static struct bin_attribute bin_attr_rw_nvmem = {
157 .attr = {
158 .name = "nvmem",
159 .mode = S_IWUSR | S_IRUGO,
160 },
161 .read = bin_attr_nvmem_read,
162 .write = bin_attr_nvmem_write,
163 };
164
165 static struct bin_attribute *nvmem_bin_rw_attributes[] = {
166 &bin_attr_rw_nvmem,
167 NULL,
168 };
169
170 static const struct attribute_group nvmem_bin_rw_group = {
171 .bin_attrs = nvmem_bin_rw_attributes,
172 };
173
174 static const struct attribute_group *nvmem_rw_dev_groups[] = {
175 &nvmem_bin_rw_group,
176 NULL,
177 };
178
179 /* read only permission */
180 static struct bin_attribute bin_attr_ro_nvmem = {
181 .attr = {
182 .name = "nvmem",
183 .mode = S_IRUGO,
184 },
185 .read = bin_attr_nvmem_read,
186 };
187
188 static struct bin_attribute *nvmem_bin_ro_attributes[] = {
189 &bin_attr_ro_nvmem,
190 NULL,
191 };
192
193 static const struct attribute_group nvmem_bin_ro_group = {
194 .bin_attrs = nvmem_bin_ro_attributes,
195 };
196
197 static const struct attribute_group *nvmem_ro_dev_groups[] = {
198 &nvmem_bin_ro_group,
199 NULL,
200 };
201
202 /* default read/write permissions, root only */
203 static struct bin_attribute bin_attr_rw_root_nvmem = {
204 .attr = {
205 .name = "nvmem",
206 .mode = S_IWUSR | S_IRUSR,
207 },
208 .read = bin_attr_nvmem_read,
209 .write = bin_attr_nvmem_write,
210 };
211
212 static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
213 &bin_attr_rw_root_nvmem,
214 NULL,
215 };
216
217 static const struct attribute_group nvmem_bin_rw_root_group = {
218 .bin_attrs = nvmem_bin_rw_root_attributes,
219 };
220
221 static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
222 &nvmem_bin_rw_root_group,
223 NULL,
224 };
225
226 /* read only permission, root only */
227 static struct bin_attribute bin_attr_ro_root_nvmem = {
228 .attr = {
229 .name = "nvmem",
230 .mode = S_IRUSR,
231 },
232 .read = bin_attr_nvmem_read,
233 };
234
235 static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
236 &bin_attr_ro_root_nvmem,
237 NULL,
238 };
239
240 static const struct attribute_group nvmem_bin_ro_root_group = {
241 .bin_attrs = nvmem_bin_ro_root_attributes,
242 };
243
244 static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
245 &nvmem_bin_ro_root_group,
246 NULL,
247 };
248
nvmem_release(struct device * dev)249 static void nvmem_release(struct device *dev)
250 {
251 struct nvmem_device *nvmem = to_nvmem_device(dev);
252
253 ida_simple_remove(&nvmem_ida, nvmem->id);
254 kfree(nvmem);
255 }
256
257 static const struct device_type nvmem_provider_type = {
258 .release = nvmem_release,
259 };
260
261 static struct bus_type nvmem_bus_type = {
262 .name = "nvmem",
263 };
264
of_nvmem_match(struct device * dev,void * nvmem_np)265 static int of_nvmem_match(struct device *dev, void *nvmem_np)
266 {
267 return dev->of_node == nvmem_np;
268 }
269
of_nvmem_find(struct device_node * nvmem_np)270 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
271 {
272 struct device *d;
273
274 if (!nvmem_np)
275 return NULL;
276
277 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
278
279 if (!d)
280 return NULL;
281
282 return to_nvmem_device(d);
283 }
284
nvmem_find_cell(const char * cell_id)285 static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
286 {
287 struct nvmem_cell *p;
288
289 mutex_lock(&nvmem_cells_mutex);
290
291 list_for_each_entry(p, &nvmem_cells, node)
292 if (!strcmp(p->name, cell_id)) {
293 mutex_unlock(&nvmem_cells_mutex);
294 return p;
295 }
296
297 mutex_unlock(&nvmem_cells_mutex);
298
299 return NULL;
300 }
301
nvmem_cell_drop(struct nvmem_cell * cell)302 static void nvmem_cell_drop(struct nvmem_cell *cell)
303 {
304 mutex_lock(&nvmem_cells_mutex);
305 list_del(&cell->node);
306 mutex_unlock(&nvmem_cells_mutex);
307 kfree(cell);
308 }
309
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)310 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
311 {
312 struct nvmem_cell *cell;
313 struct list_head *p, *n;
314
315 list_for_each_safe(p, n, &nvmem_cells) {
316 cell = list_entry(p, struct nvmem_cell, node);
317 if (cell->nvmem == nvmem)
318 nvmem_cell_drop(cell);
319 }
320 }
321
nvmem_cell_add(struct nvmem_cell * cell)322 static void nvmem_cell_add(struct nvmem_cell *cell)
323 {
324 mutex_lock(&nvmem_cells_mutex);
325 list_add_tail(&cell->node, &nvmem_cells);
326 mutex_unlock(&nvmem_cells_mutex);
327 }
328
nvmem_cell_info_to_nvmem_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)329 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
330 const struct nvmem_cell_info *info,
331 struct nvmem_cell *cell)
332 {
333 cell->nvmem = nvmem;
334 cell->offset = info->offset;
335 cell->bytes = info->bytes;
336 cell->name = info->name;
337
338 cell->bit_offset = info->bit_offset;
339 cell->nbits = info->nbits;
340
341 if (cell->nbits)
342 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
343 BITS_PER_BYTE);
344
345 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
346 dev_err(&nvmem->dev,
347 "cell %s unaligned to nvmem stride %d\n",
348 cell->name, nvmem->stride);
349 return -EINVAL;
350 }
351
352 return 0;
353 }
354
355 /**
356 * nvmem_add_cells() - Add cell information to an nvmem device
357 *
358 * @nvmem: nvmem device to add cells to.
359 * @info: nvmem cell info to add to the device
360 * @ncells: number of cells in info
361 *
362 * Return: 0 or negative error code on failure.
363 */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)364 int nvmem_add_cells(struct nvmem_device *nvmem,
365 const struct nvmem_cell_info *info,
366 int ncells)
367 {
368 struct nvmem_cell **cells;
369 int i, rval;
370
371 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
372 if (!cells)
373 return -ENOMEM;
374
375 for (i = 0; i < ncells; i++) {
376 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
377 if (!cells[i]) {
378 rval = -ENOMEM;
379 goto err;
380 }
381
382 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
383 if (rval) {
384 kfree(cells[i]);
385 goto err;
386 }
387
388 nvmem_cell_add(cells[i]);
389 }
390
391 /* remove tmp array */
392 kfree(cells);
393
394 return 0;
395 err:
396 while (i--)
397 nvmem_cell_drop(cells[i]);
398
399 kfree(cells);
400
401 return rval;
402 }
403 EXPORT_SYMBOL_GPL(nvmem_add_cells);
404
405 /*
406 * nvmem_setup_compat() - Create an additional binary entry in
407 * drivers sys directory, to be backwards compatible with the older
408 * drivers/misc/eeprom drivers.
409 */
nvmem_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)410 static int nvmem_setup_compat(struct nvmem_device *nvmem,
411 const struct nvmem_config *config)
412 {
413 int rval;
414
415 if (!config->base_dev)
416 return -EINVAL;
417
418 if (nvmem->read_only)
419 nvmem->eeprom = bin_attr_ro_root_nvmem;
420 else
421 nvmem->eeprom = bin_attr_rw_root_nvmem;
422 nvmem->eeprom.attr.name = "eeprom";
423 nvmem->eeprom.size = nvmem->size;
424 #ifdef CONFIG_DEBUG_LOCK_ALLOC
425 nvmem->eeprom.attr.key = &eeprom_lock_key;
426 #endif
427 nvmem->eeprom.private = &nvmem->dev;
428 nvmem->base_dev = config->base_dev;
429
430 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
431 if (rval) {
432 dev_err(&nvmem->dev,
433 "Failed to create eeprom binary file %d\n", rval);
434 return rval;
435 }
436
437 nvmem->flags |= FLAG_COMPAT;
438
439 return 0;
440 }
441
442 /**
443 * nvmem_register() - Register a nvmem device for given nvmem_config.
444 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
445 *
446 * @config: nvmem device configuration with which nvmem device is created.
447 *
448 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
449 * on success.
450 */
451
nvmem_register(const struct nvmem_config * config)452 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
453 {
454 struct nvmem_device *nvmem;
455 int rval;
456
457 if (!config->dev)
458 return ERR_PTR(-EINVAL);
459
460 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
461 if (!nvmem)
462 return ERR_PTR(-ENOMEM);
463
464 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
465 if (rval < 0) {
466 kfree(nvmem);
467 return ERR_PTR(rval);
468 }
469
470 nvmem->id = rval;
471 nvmem->owner = config->owner;
472 if (!nvmem->owner && config->dev->driver)
473 nvmem->owner = config->dev->driver->owner;
474 nvmem->stride = config->stride ?: 1;
475 nvmem->word_size = config->word_size ?: 1;
476 nvmem->size = config->size;
477 nvmem->dev.type = &nvmem_provider_type;
478 nvmem->dev.bus = &nvmem_bus_type;
479 nvmem->dev.parent = config->dev;
480 nvmem->priv = config->priv;
481 nvmem->reg_read = config->reg_read;
482 nvmem->reg_write = config->reg_write;
483 nvmem->dev.of_node = config->dev->of_node;
484
485 if (config->id == -1 && config->name) {
486 dev_set_name(&nvmem->dev, "%s", config->name);
487 } else {
488 dev_set_name(&nvmem->dev, "%s%d",
489 config->name ? : "nvmem",
490 config->name ? config->id : nvmem->id);
491 }
492
493 nvmem->read_only = device_property_present(config->dev, "read-only") |
494 config->read_only;
495
496 if (config->root_only)
497 nvmem->dev.groups = nvmem->read_only ?
498 nvmem_ro_root_dev_groups :
499 nvmem_rw_root_dev_groups;
500 else
501 nvmem->dev.groups = nvmem->read_only ?
502 nvmem_ro_dev_groups :
503 nvmem_rw_dev_groups;
504
505 device_initialize(&nvmem->dev);
506
507 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
508
509 rval = device_add(&nvmem->dev);
510 if (rval)
511 goto err_put_device;
512
513 if (config->compat) {
514 rval = nvmem_setup_compat(nvmem, config);
515 if (rval)
516 goto err_device_del;
517 }
518
519 if (config->cells)
520 nvmem_add_cells(nvmem, config->cells, config->ncells);
521
522 return nvmem;
523
524 err_device_del:
525 device_del(&nvmem->dev);
526 err_put_device:
527 put_device(&nvmem->dev);
528
529 return ERR_PTR(rval);
530 }
531 EXPORT_SYMBOL_GPL(nvmem_register);
532
533 /**
534 * nvmem_unregister() - Unregister previously registered nvmem device
535 *
536 * @nvmem: Pointer to previously registered nvmem device.
537 *
538 * Return: Will be an negative on error or a zero on success.
539 */
nvmem_unregister(struct nvmem_device * nvmem)540 int nvmem_unregister(struct nvmem_device *nvmem)
541 {
542 mutex_lock(&nvmem_mutex);
543 if (nvmem->users) {
544 mutex_unlock(&nvmem_mutex);
545 return -EBUSY;
546 }
547 mutex_unlock(&nvmem_mutex);
548
549 if (nvmem->flags & FLAG_COMPAT)
550 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
551
552 nvmem_device_remove_all_cells(nvmem);
553 device_del(&nvmem->dev);
554 put_device(&nvmem->dev);
555
556 return 0;
557 }
558 EXPORT_SYMBOL_GPL(nvmem_unregister);
559
devm_nvmem_release(struct device * dev,void * res)560 static void devm_nvmem_release(struct device *dev, void *res)
561 {
562 WARN_ON(nvmem_unregister(*(struct nvmem_device **)res));
563 }
564
565 /**
566 * devm_nvmem_register() - Register a managed nvmem device for given
567 * nvmem_config.
568 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
569 *
570 * @dev: Device that uses the nvmem device.
571 * @config: nvmem device configuration with which nvmem device is created.
572 *
573 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
574 * on success.
575 */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)576 struct nvmem_device *devm_nvmem_register(struct device *dev,
577 const struct nvmem_config *config)
578 {
579 struct nvmem_device **ptr, *nvmem;
580
581 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
582 if (!ptr)
583 return ERR_PTR(-ENOMEM);
584
585 nvmem = nvmem_register(config);
586
587 if (!IS_ERR(nvmem)) {
588 *ptr = nvmem;
589 devres_add(dev, ptr);
590 } else {
591 devres_free(ptr);
592 }
593
594 return nvmem;
595 }
596 EXPORT_SYMBOL_GPL(devm_nvmem_register);
597
devm_nvmem_match(struct device * dev,void * res,void * data)598 static int devm_nvmem_match(struct device *dev, void *res, void *data)
599 {
600 struct nvmem_device **r = res;
601
602 return *r == data;
603 }
604
605 /**
606 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
607 * device.
608 *
609 * @dev: Device that uses the nvmem device.
610 * @nvmem: Pointer to previously registered nvmem device.
611 *
612 * Return: Will be an negative on error or a zero on success.
613 */
devm_nvmem_unregister(struct device * dev,struct nvmem_device * nvmem)614 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
615 {
616 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
617 }
618 EXPORT_SYMBOL(devm_nvmem_unregister);
619
620
__nvmem_device_get(struct device_node * np,struct nvmem_cell ** cellp,const char * cell_id)621 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
622 struct nvmem_cell **cellp,
623 const char *cell_id)
624 {
625 struct nvmem_device *nvmem = NULL;
626
627 mutex_lock(&nvmem_mutex);
628
629 if (np) {
630 nvmem = of_nvmem_find(np);
631 if (!nvmem) {
632 mutex_unlock(&nvmem_mutex);
633 return ERR_PTR(-EPROBE_DEFER);
634 }
635 } else {
636 struct nvmem_cell *cell = nvmem_find_cell(cell_id);
637
638 if (cell) {
639 nvmem = cell->nvmem;
640 *cellp = cell;
641 }
642
643 if (!nvmem) {
644 mutex_unlock(&nvmem_mutex);
645 return ERR_PTR(-ENOENT);
646 }
647 }
648
649 nvmem->users++;
650 mutex_unlock(&nvmem_mutex);
651
652 if (!try_module_get(nvmem->owner)) {
653 dev_err(&nvmem->dev,
654 "could not increase module refcount for cell %s\n",
655 nvmem->name);
656
657 mutex_lock(&nvmem_mutex);
658 nvmem->users--;
659 mutex_unlock(&nvmem_mutex);
660
661 return ERR_PTR(-EINVAL);
662 }
663
664 return nvmem;
665 }
666
__nvmem_device_put(struct nvmem_device * nvmem)667 static void __nvmem_device_put(struct nvmem_device *nvmem)
668 {
669 module_put(nvmem->owner);
670 mutex_lock(&nvmem_mutex);
671 nvmem->users--;
672 mutex_unlock(&nvmem_mutex);
673 }
674
nvmem_find(const char * name)675 static struct nvmem_device *nvmem_find(const char *name)
676 {
677 struct device *d;
678
679 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
680
681 if (!d)
682 return NULL;
683
684 return to_nvmem_device(d);
685 }
686
687 #if IS_ENABLED(CONFIG_OF)
688 /**
689 * of_nvmem_device_get() - Get nvmem device from a given id
690 *
691 * @np: Device tree node that uses the nvmem device.
692 * @id: nvmem name from nvmem-names property.
693 *
694 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
695 * on success.
696 */
of_nvmem_device_get(struct device_node * np,const char * id)697 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
698 {
699
700 struct device_node *nvmem_np;
701 int index;
702
703 index = of_property_match_string(np, "nvmem-names", id);
704
705 nvmem_np = of_parse_phandle(np, "nvmem", index);
706 if (!nvmem_np)
707 return ERR_PTR(-EINVAL);
708
709 return __nvmem_device_get(nvmem_np, NULL, NULL);
710 }
711 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
712 #endif
713
714 /**
715 * nvmem_device_get() - Get nvmem device from a given id
716 *
717 * @dev: Device that uses the nvmem device.
718 * @dev_name: name of the requested nvmem device.
719 *
720 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
721 * on success.
722 */
nvmem_device_get(struct device * dev,const char * dev_name)723 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
724 {
725 if (dev->of_node) { /* try dt first */
726 struct nvmem_device *nvmem;
727
728 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
729
730 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
731 return nvmem;
732
733 }
734
735 return nvmem_find(dev_name);
736 }
737 EXPORT_SYMBOL_GPL(nvmem_device_get);
738
devm_nvmem_device_match(struct device * dev,void * res,void * data)739 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
740 {
741 struct nvmem_device **nvmem = res;
742
743 if (WARN_ON(!nvmem || !*nvmem))
744 return 0;
745
746 return *nvmem == data;
747 }
748
devm_nvmem_device_release(struct device * dev,void * res)749 static void devm_nvmem_device_release(struct device *dev, void *res)
750 {
751 nvmem_device_put(*(struct nvmem_device **)res);
752 }
753
754 /**
755 * devm_nvmem_device_put() - put alredy got nvmem device
756 *
757 * @dev: Device that uses the nvmem device.
758 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
759 * that needs to be released.
760 */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)761 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
762 {
763 int ret;
764
765 ret = devres_release(dev, devm_nvmem_device_release,
766 devm_nvmem_device_match, nvmem);
767
768 WARN_ON(ret);
769 }
770 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
771
772 /**
773 * nvmem_device_put() - put alredy got nvmem device
774 *
775 * @nvmem: pointer to nvmem device that needs to be released.
776 */
nvmem_device_put(struct nvmem_device * nvmem)777 void nvmem_device_put(struct nvmem_device *nvmem)
778 {
779 __nvmem_device_put(nvmem);
780 }
781 EXPORT_SYMBOL_GPL(nvmem_device_put);
782
783 /**
784 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
785 *
786 * @dev: Device that requests the nvmem device.
787 * @id: name id for the requested nvmem device.
788 *
789 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
790 * on success. The nvmem_cell will be freed by the automatically once the
791 * device is freed.
792 */
devm_nvmem_device_get(struct device * dev,const char * id)793 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
794 {
795 struct nvmem_device **ptr, *nvmem;
796
797 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
798 if (!ptr)
799 return ERR_PTR(-ENOMEM);
800
801 nvmem = nvmem_device_get(dev, id);
802 if (!IS_ERR(nvmem)) {
803 *ptr = nvmem;
804 devres_add(dev, ptr);
805 } else {
806 devres_free(ptr);
807 }
808
809 return nvmem;
810 }
811 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
812
nvmem_cell_get_from_list(const char * cell_id)813 static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
814 {
815 struct nvmem_cell *cell = NULL;
816 struct nvmem_device *nvmem;
817
818 nvmem = __nvmem_device_get(NULL, &cell, cell_id);
819 if (IS_ERR(nvmem))
820 return ERR_CAST(nvmem);
821
822 return cell;
823 }
824
825 #if IS_ENABLED(CONFIG_OF)
826 /**
827 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
828 *
829 * @np: Device tree node that uses the nvmem cell.
830 * @name: nvmem cell name from nvmem-cell-names property, or NULL
831 * for the cell at index 0 (the lone cell with no accompanying
832 * nvmem-cell-names property).
833 *
834 * Return: Will be an ERR_PTR() on error or a valid pointer
835 * to a struct nvmem_cell. The nvmem_cell will be freed by the
836 * nvmem_cell_put().
837 */
of_nvmem_cell_get(struct device_node * np,const char * name)838 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
839 const char *name)
840 {
841 struct device_node *cell_np, *nvmem_np;
842 struct nvmem_cell *cell;
843 struct nvmem_device *nvmem;
844 const __be32 *addr;
845 int rval, len;
846 int index = 0;
847
848 /* if cell name exists, find index to the name */
849 if (name)
850 index = of_property_match_string(np, "nvmem-cell-names", name);
851
852 cell_np = of_parse_phandle(np, "nvmem-cells", index);
853 if (!cell_np)
854 return ERR_PTR(-EINVAL);
855
856 nvmem_np = of_get_next_parent(cell_np);
857 if (!nvmem_np)
858 return ERR_PTR(-EINVAL);
859
860 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
861 of_node_put(nvmem_np);
862 if (IS_ERR(nvmem))
863 return ERR_CAST(nvmem);
864
865 addr = of_get_property(cell_np, "reg", &len);
866 if (!addr || (len < 2 * sizeof(u32))) {
867 dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
868 cell_np);
869 rval = -EINVAL;
870 goto err_mem;
871 }
872
873 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
874 if (!cell) {
875 rval = -ENOMEM;
876 goto err_mem;
877 }
878
879 cell->nvmem = nvmem;
880 cell->offset = be32_to_cpup(addr++);
881 cell->bytes = be32_to_cpup(addr);
882 cell->name = cell_np->name;
883
884 addr = of_get_property(cell_np, "bits", &len);
885 if (addr && len == (2 * sizeof(u32))) {
886 cell->bit_offset = be32_to_cpup(addr++);
887 cell->nbits = be32_to_cpup(addr);
888 }
889
890 if (cell->nbits)
891 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
892 BITS_PER_BYTE);
893
894 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
895 dev_err(&nvmem->dev,
896 "cell %s unaligned to nvmem stride %d\n",
897 cell->name, nvmem->stride);
898 rval = -EINVAL;
899 goto err_sanity;
900 }
901
902 nvmem_cell_add(cell);
903
904 return cell;
905
906 err_sanity:
907 kfree(cell);
908
909 err_mem:
910 __nvmem_device_put(nvmem);
911
912 return ERR_PTR(rval);
913 }
914 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
915 #endif
916
917 /**
918 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
919 *
920 * @dev: Device that requests the nvmem cell.
921 * @cell_id: nvmem cell name to get.
922 *
923 * Return: Will be an ERR_PTR() on error or a valid pointer
924 * to a struct nvmem_cell. The nvmem_cell will be freed by the
925 * nvmem_cell_put().
926 */
nvmem_cell_get(struct device * dev,const char * cell_id)927 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
928 {
929 struct nvmem_cell *cell;
930
931 if (dev->of_node) { /* try dt first */
932 cell = of_nvmem_cell_get(dev->of_node, cell_id);
933 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
934 return cell;
935 }
936
937 /* NULL cell_id only allowed for device tree; invalid otherwise */
938 if (!cell_id)
939 return ERR_PTR(-EINVAL);
940
941 return nvmem_cell_get_from_list(cell_id);
942 }
943 EXPORT_SYMBOL_GPL(nvmem_cell_get);
944
devm_nvmem_cell_release(struct device * dev,void * res)945 static void devm_nvmem_cell_release(struct device *dev, void *res)
946 {
947 nvmem_cell_put(*(struct nvmem_cell **)res);
948 }
949
950 /**
951 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
952 *
953 * @dev: Device that requests the nvmem cell.
954 * @id: nvmem cell name id to get.
955 *
956 * Return: Will be an ERR_PTR() on error or a valid pointer
957 * to a struct nvmem_cell. The nvmem_cell will be freed by the
958 * automatically once the device is freed.
959 */
devm_nvmem_cell_get(struct device * dev,const char * id)960 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
961 {
962 struct nvmem_cell **ptr, *cell;
963
964 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
965 if (!ptr)
966 return ERR_PTR(-ENOMEM);
967
968 cell = nvmem_cell_get(dev, id);
969 if (!IS_ERR(cell)) {
970 *ptr = cell;
971 devres_add(dev, ptr);
972 } else {
973 devres_free(ptr);
974 }
975
976 return cell;
977 }
978 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
979
devm_nvmem_cell_match(struct device * dev,void * res,void * data)980 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
981 {
982 struct nvmem_cell **c = res;
983
984 if (WARN_ON(!c || !*c))
985 return 0;
986
987 return *c == data;
988 }
989
990 /**
991 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
992 * from devm_nvmem_cell_get.
993 *
994 * @dev: Device that requests the nvmem cell.
995 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
996 */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)997 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
998 {
999 int ret;
1000
1001 ret = devres_release(dev, devm_nvmem_cell_release,
1002 devm_nvmem_cell_match, cell);
1003
1004 WARN_ON(ret);
1005 }
1006 EXPORT_SYMBOL(devm_nvmem_cell_put);
1007
1008 /**
1009 * nvmem_cell_put() - Release previously allocated nvmem cell.
1010 *
1011 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1012 */
nvmem_cell_put(struct nvmem_cell * cell)1013 void nvmem_cell_put(struct nvmem_cell *cell)
1014 {
1015 struct nvmem_device *nvmem = cell->nvmem;
1016
1017 __nvmem_device_put(nvmem);
1018 nvmem_cell_drop(cell);
1019 }
1020 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1021
nvmem_shift_read_buffer_in_place(struct nvmem_cell * cell,void * buf)1022 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1023 {
1024 u8 *p, *b;
1025 int i, bit_offset = cell->bit_offset;
1026
1027 p = b = buf;
1028 if (bit_offset) {
1029 /* First shift */
1030 *b++ >>= bit_offset;
1031
1032 /* setup rest of the bytes if any */
1033 for (i = 1; i < cell->bytes; i++) {
1034 /* Get bits from next byte and shift them towards msb */
1035 *p |= *b << (BITS_PER_BYTE - bit_offset);
1036
1037 p = b;
1038 *b++ >>= bit_offset;
1039 }
1040
1041 /* result fits in less bytes */
1042 if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
1043 *p-- = 0;
1044 }
1045 /* clear msb bits if any leftover in the last byte */
1046 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
1047 }
1048
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell * cell,void * buf,size_t * len)1049 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1050 struct nvmem_cell *cell,
1051 void *buf, size_t *len)
1052 {
1053 int rc;
1054
1055 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1056
1057 if (rc)
1058 return rc;
1059
1060 /* shift bits in-place */
1061 if (cell->bit_offset || cell->nbits)
1062 nvmem_shift_read_buffer_in_place(cell, buf);
1063
1064 if (len)
1065 *len = cell->bytes;
1066
1067 return 0;
1068 }
1069
1070 /**
1071 * nvmem_cell_read() - Read a given nvmem cell
1072 *
1073 * @cell: nvmem cell to be read.
1074 * @len: pointer to length of cell which will be populated on successful read;
1075 * can be NULL.
1076 *
1077 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1078 * buffer should be freed by the consumer with a kfree().
1079 */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1080 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1081 {
1082 struct nvmem_device *nvmem = cell->nvmem;
1083 u8 *buf;
1084 int rc;
1085
1086 if (!nvmem)
1087 return ERR_PTR(-EINVAL);
1088
1089 buf = kzalloc(cell->bytes, GFP_KERNEL);
1090 if (!buf)
1091 return ERR_PTR(-ENOMEM);
1092
1093 rc = __nvmem_cell_read(nvmem, cell, buf, len);
1094 if (rc) {
1095 kfree(buf);
1096 return ERR_PTR(rc);
1097 }
1098
1099 return buf;
1100 }
1101 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1102
nvmem_cell_prepare_write_buffer(struct nvmem_cell * cell,u8 * _buf,int len)1103 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1104 u8 *_buf, int len)
1105 {
1106 struct nvmem_device *nvmem = cell->nvmem;
1107 int i, rc, nbits, bit_offset = cell->bit_offset;
1108 u8 v, *p, *buf, *b, pbyte, pbits;
1109
1110 nbits = cell->nbits;
1111 buf = kzalloc(cell->bytes, GFP_KERNEL);
1112 if (!buf)
1113 return ERR_PTR(-ENOMEM);
1114
1115 memcpy(buf, _buf, len);
1116 p = b = buf;
1117
1118 if (bit_offset) {
1119 pbyte = *b;
1120 *b <<= bit_offset;
1121
1122 /* setup the first byte with lsb bits from nvmem */
1123 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1124 if (rc)
1125 goto err;
1126 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1127
1128 /* setup rest of the byte if any */
1129 for (i = 1; i < cell->bytes; i++) {
1130 /* Get last byte bits and shift them towards lsb */
1131 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1132 pbyte = *b;
1133 p = b;
1134 *b <<= bit_offset;
1135 *b++ |= pbits;
1136 }
1137 }
1138
1139 /* if it's not end on byte boundary */
1140 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1141 /* setup the last byte with msb bits from nvmem */
1142 rc = nvmem_reg_read(nvmem,
1143 cell->offset + cell->bytes - 1, &v, 1);
1144 if (rc)
1145 goto err;
1146 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1147
1148 }
1149
1150 return buf;
1151 err:
1152 kfree(buf);
1153 return ERR_PTR(rc);
1154 }
1155
1156 /**
1157 * nvmem_cell_write() - Write to a given nvmem cell
1158 *
1159 * @cell: nvmem cell to be written.
1160 * @buf: Buffer to be written.
1161 * @len: length of buffer to be written to nvmem cell.
1162 *
1163 * Return: length of bytes written or negative on failure.
1164 */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1165 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1166 {
1167 struct nvmem_device *nvmem = cell->nvmem;
1168 int rc;
1169
1170 if (!nvmem || nvmem->read_only ||
1171 (cell->bit_offset == 0 && len != cell->bytes))
1172 return -EINVAL;
1173
1174 if (cell->bit_offset || cell->nbits) {
1175 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1176 if (IS_ERR(buf))
1177 return PTR_ERR(buf);
1178 }
1179
1180 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1181
1182 /* free the tmp buffer */
1183 if (cell->bit_offset || cell->nbits)
1184 kfree(buf);
1185
1186 if (rc)
1187 return rc;
1188
1189 return len;
1190 }
1191 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1192
1193 /**
1194 * nvmem_cell_read_u32() - Read a cell value as an u32
1195 *
1196 * @dev: Device that requests the nvmem cell.
1197 * @cell_id: Name of nvmem cell to read.
1198 * @val: pointer to output value.
1199 *
1200 * Return: 0 on success or negative errno.
1201 */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1202 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1203 {
1204 struct nvmem_cell *cell;
1205 void *buf;
1206 size_t len;
1207
1208 cell = nvmem_cell_get(dev, cell_id);
1209 if (IS_ERR(cell))
1210 return PTR_ERR(cell);
1211
1212 buf = nvmem_cell_read(cell, &len);
1213 if (IS_ERR(buf)) {
1214 nvmem_cell_put(cell);
1215 return PTR_ERR(buf);
1216 }
1217 if (len != sizeof(*val)) {
1218 kfree(buf);
1219 nvmem_cell_put(cell);
1220 return -EINVAL;
1221 }
1222 memcpy(val, buf, sizeof(*val));
1223
1224 kfree(buf);
1225 nvmem_cell_put(cell);
1226 return 0;
1227 }
1228 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1229
1230 /**
1231 * nvmem_device_cell_read() - Read a given nvmem device and cell
1232 *
1233 * @nvmem: nvmem device to read from.
1234 * @info: nvmem cell info to be read.
1235 * @buf: buffer pointer which will be populated on successful read.
1236 *
1237 * Return: length of successful bytes read on success and negative
1238 * error code on error.
1239 */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1240 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1241 struct nvmem_cell_info *info, void *buf)
1242 {
1243 struct nvmem_cell cell;
1244 int rc;
1245 ssize_t len;
1246
1247 if (!nvmem)
1248 return -EINVAL;
1249
1250 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1251 if (rc)
1252 return rc;
1253
1254 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1255 if (rc)
1256 return rc;
1257
1258 return len;
1259 }
1260 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1261
1262 /**
1263 * nvmem_device_cell_write() - Write cell to a given nvmem device
1264 *
1265 * @nvmem: nvmem device to be written to.
1266 * @info: nvmem cell info to be written.
1267 * @buf: buffer to be written to cell.
1268 *
1269 * Return: length of bytes written or negative error code on failure.
1270 * */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1271 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1272 struct nvmem_cell_info *info, void *buf)
1273 {
1274 struct nvmem_cell cell;
1275 int rc;
1276
1277 if (!nvmem)
1278 return -EINVAL;
1279
1280 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1281 if (rc)
1282 return rc;
1283
1284 return nvmem_cell_write(&cell, buf, cell.bytes);
1285 }
1286 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1287
1288 /**
1289 * nvmem_device_read() - Read from a given nvmem device
1290 *
1291 * @nvmem: nvmem device to read from.
1292 * @offset: offset in nvmem device.
1293 * @bytes: number of bytes to read.
1294 * @buf: buffer pointer which will be populated on successful read.
1295 *
1296 * Return: length of successful bytes read on success and negative
1297 * error code on error.
1298 */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1299 int nvmem_device_read(struct nvmem_device *nvmem,
1300 unsigned int offset,
1301 size_t bytes, void *buf)
1302 {
1303 int rc;
1304
1305 if (!nvmem)
1306 return -EINVAL;
1307
1308 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1309
1310 if (rc)
1311 return rc;
1312
1313 return bytes;
1314 }
1315 EXPORT_SYMBOL_GPL(nvmem_device_read);
1316
1317 /**
1318 * nvmem_device_write() - Write cell to a given nvmem device
1319 *
1320 * @nvmem: nvmem device to be written to.
1321 * @offset: offset in nvmem device.
1322 * @bytes: number of bytes to write.
1323 * @buf: buffer to be written.
1324 *
1325 * Return: length of bytes written or negative error code on failure.
1326 * */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1327 int nvmem_device_write(struct nvmem_device *nvmem,
1328 unsigned int offset,
1329 size_t bytes, void *buf)
1330 {
1331 int rc;
1332
1333 if (!nvmem)
1334 return -EINVAL;
1335
1336 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1337
1338 if (rc)
1339 return rc;
1340
1341
1342 return bytes;
1343 }
1344 EXPORT_SYMBOL_GPL(nvmem_device_write);
1345
nvmem_init(void)1346 static int __init nvmem_init(void)
1347 {
1348 return bus_register(&nvmem_bus_type);
1349 }
1350
nvmem_exit(void)1351 static void __exit nvmem_exit(void)
1352 {
1353 bus_unregister(&nvmem_bus_type);
1354 }
1355
1356 subsys_initcall(nvmem_init);
1357 module_exit(nvmem_exit);
1358
1359 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1360 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1361 MODULE_DESCRIPTION("nvmem Driver Core");
1362 MODULE_LICENSE("GPL v2");
1363