]>
Commit | Line | Data |
---|---|---|
eace75cf SK |
1 | /* |
2 | * nvmem framework core. | |
3 | * | |
4 | * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org> | |
5 | * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 and | |
9 | * only version 2 as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | */ | |
16 | ||
17 | #include <linux/device.h> | |
18 | #include <linux/export.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/idr.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/nvmem-consumer.h> | |
24 | #include <linux/nvmem-provider.h> | |
25 | #include <linux/of.h> | |
26 | #include <linux/regmap.h> | |
27 | #include <linux/slab.h> | |
28 | ||
29 | struct nvmem_device { | |
30 | const char *name; | |
31 | struct regmap *regmap; | |
32 | struct module *owner; | |
33 | struct device dev; | |
34 | int stride; | |
35 | int word_size; | |
36 | int ncells; | |
37 | int id; | |
38 | int users; | |
39 | size_t size; | |
40 | bool read_only; | |
41 | }; | |
42 | ||
43 | struct nvmem_cell { | |
44 | const char *name; | |
45 | int offset; | |
46 | int bytes; | |
47 | int bit_offset; | |
48 | int nbits; | |
49 | struct nvmem_device *nvmem; | |
50 | struct list_head node; | |
51 | }; | |
52 | ||
53 | static DEFINE_MUTEX(nvmem_mutex); | |
54 | static DEFINE_IDA(nvmem_ida); | |
55 | ||
56 | static LIST_HEAD(nvmem_cells); | |
57 | static DEFINE_MUTEX(nvmem_cells_mutex); | |
58 | ||
59 | #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) | |
60 | ||
61 | static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, | |
62 | struct bin_attribute *attr, | |
63 | char *buf, loff_t pos, size_t count) | |
64 | { | |
65 | struct device *dev = container_of(kobj, struct device, kobj); | |
66 | struct nvmem_device *nvmem = to_nvmem_device(dev); | |
67 | int rc; | |
68 | ||
69 | /* Stop the user from reading */ | |
7c806883 | 70 | if (pos >= nvmem->size) |
eace75cf SK |
71 | return 0; |
72 | ||
313a72ff SK |
73 | if (count < nvmem->word_size) |
74 | return -EINVAL; | |
75 | ||
eace75cf SK |
76 | if (pos + count > nvmem->size) |
77 | count = nvmem->size - pos; | |
78 | ||
79 | count = round_down(count, nvmem->word_size); | |
80 | ||
81 | rc = regmap_raw_read(nvmem->regmap, pos, buf, count); | |
82 | ||
83 | if (IS_ERR_VALUE(rc)) | |
84 | return rc; | |
85 | ||
86 | return count; | |
87 | } | |
88 | ||
89 | static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, | |
90 | struct bin_attribute *attr, | |
91 | char *buf, loff_t pos, size_t count) | |
92 | { | |
93 | struct device *dev = container_of(kobj, struct device, kobj); | |
94 | struct nvmem_device *nvmem = to_nvmem_device(dev); | |
95 | int rc; | |
96 | ||
97 | /* Stop the user from writing */ | |
7c806883 | 98 | if (pos >= nvmem->size) |
eace75cf SK |
99 | return 0; |
100 | ||
313a72ff SK |
101 | if (count < nvmem->word_size) |
102 | return -EINVAL; | |
103 | ||
eace75cf SK |
104 | if (pos + count > nvmem->size) |
105 | count = nvmem->size - pos; | |
106 | ||
107 | count = round_down(count, nvmem->word_size); | |
108 | ||
109 | rc = regmap_raw_write(nvmem->regmap, pos, buf, count); | |
110 | ||
111 | if (IS_ERR_VALUE(rc)) | |
112 | return rc; | |
113 | ||
114 | return count; | |
115 | } | |
116 | ||
117 | /* default read/write permissions */ | |
118 | static struct bin_attribute bin_attr_rw_nvmem = { | |
119 | .attr = { | |
120 | .name = "nvmem", | |
121 | .mode = S_IWUSR | S_IRUGO, | |
122 | }, | |
123 | .read = bin_attr_nvmem_read, | |
124 | .write = bin_attr_nvmem_write, | |
125 | }; | |
126 | ||
127 | static struct bin_attribute *nvmem_bin_rw_attributes[] = { | |
128 | &bin_attr_rw_nvmem, | |
129 | NULL, | |
130 | }; | |
131 | ||
132 | static const struct attribute_group nvmem_bin_rw_group = { | |
133 | .bin_attrs = nvmem_bin_rw_attributes, | |
134 | }; | |
135 | ||
136 | static const struct attribute_group *nvmem_rw_dev_groups[] = { | |
137 | &nvmem_bin_rw_group, | |
138 | NULL, | |
139 | }; | |
140 | ||
141 | /* read only permission */ | |
142 | static struct bin_attribute bin_attr_ro_nvmem = { | |
143 | .attr = { | |
144 | .name = "nvmem", | |
145 | .mode = S_IRUGO, | |
146 | }, | |
147 | .read = bin_attr_nvmem_read, | |
148 | }; | |
149 | ||
150 | static struct bin_attribute *nvmem_bin_ro_attributes[] = { | |
151 | &bin_attr_ro_nvmem, | |
152 | NULL, | |
153 | }; | |
154 | ||
155 | static const struct attribute_group nvmem_bin_ro_group = { | |
156 | .bin_attrs = nvmem_bin_ro_attributes, | |
157 | }; | |
158 | ||
159 | static const struct attribute_group *nvmem_ro_dev_groups[] = { | |
160 | &nvmem_bin_ro_group, | |
161 | NULL, | |
162 | }; | |
163 | ||
164 | static void nvmem_release(struct device *dev) | |
165 | { | |
166 | struct nvmem_device *nvmem = to_nvmem_device(dev); | |
167 | ||
168 | ida_simple_remove(&nvmem_ida, nvmem->id); | |
169 | kfree(nvmem); | |
170 | } | |
171 | ||
172 | static const struct device_type nvmem_provider_type = { | |
173 | .release = nvmem_release, | |
174 | }; | |
175 | ||
176 | static struct bus_type nvmem_bus_type = { | |
177 | .name = "nvmem", | |
178 | }; | |
179 | ||
180 | static int of_nvmem_match(struct device *dev, void *nvmem_np) | |
181 | { | |
182 | return dev->of_node == nvmem_np; | |
183 | } | |
184 | ||
185 | static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np) | |
186 | { | |
187 | struct device *d; | |
188 | ||
189 | if (!nvmem_np) | |
190 | return NULL; | |
191 | ||
192 | d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match); | |
193 | ||
194 | if (!d) | |
195 | return NULL; | |
196 | ||
197 | return to_nvmem_device(d); | |
198 | } | |
199 | ||
200 | static struct nvmem_cell *nvmem_find_cell(const char *cell_id) | |
201 | { | |
202 | struct nvmem_cell *p; | |
203 | ||
204 | list_for_each_entry(p, &nvmem_cells, node) | |
205 | if (p && !strcmp(p->name, cell_id)) | |
206 | return p; | |
207 | ||
208 | return NULL; | |
209 | } | |
210 | ||
211 | static void nvmem_cell_drop(struct nvmem_cell *cell) | |
212 | { | |
213 | mutex_lock(&nvmem_cells_mutex); | |
214 | list_del(&cell->node); | |
215 | mutex_unlock(&nvmem_cells_mutex); | |
216 | kfree(cell); | |
217 | } | |
218 | ||
219 | static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem) | |
220 | { | |
221 | struct nvmem_cell *cell; | |
222 | struct list_head *p, *n; | |
223 | ||
224 | list_for_each_safe(p, n, &nvmem_cells) { | |
225 | cell = list_entry(p, struct nvmem_cell, node); | |
226 | if (cell->nvmem == nvmem) | |
227 | nvmem_cell_drop(cell); | |
228 | } | |
229 | } | |
230 | ||
231 | static void nvmem_cell_add(struct nvmem_cell *cell) | |
232 | { | |
233 | mutex_lock(&nvmem_cells_mutex); | |
234 | list_add_tail(&cell->node, &nvmem_cells); | |
235 | mutex_unlock(&nvmem_cells_mutex); | |
236 | } | |
237 | ||
238 | static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, | |
239 | const struct nvmem_cell_info *info, | |
240 | struct nvmem_cell *cell) | |
241 | { | |
242 | cell->nvmem = nvmem; | |
243 | cell->offset = info->offset; | |
244 | cell->bytes = info->bytes; | |
245 | cell->name = info->name; | |
246 | ||
247 | cell->bit_offset = info->bit_offset; | |
248 | cell->nbits = info->nbits; | |
249 | ||
250 | if (cell->nbits) | |
251 | cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, | |
252 | BITS_PER_BYTE); | |
253 | ||
254 | if (!IS_ALIGNED(cell->offset, nvmem->stride)) { | |
255 | dev_err(&nvmem->dev, | |
256 | "cell %s unaligned to nvmem stride %d\n", | |
257 | cell->name, nvmem->stride); | |
258 | return -EINVAL; | |
259 | } | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static int nvmem_add_cells(struct nvmem_device *nvmem, | |
265 | const struct nvmem_config *cfg) | |
266 | { | |
267 | struct nvmem_cell **cells; | |
268 | const struct nvmem_cell_info *info = cfg->cells; | |
269 | int i, rval; | |
270 | ||
271 | cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL); | |
272 | if (!cells) | |
273 | return -ENOMEM; | |
274 | ||
275 | for (i = 0; i < cfg->ncells; i++) { | |
276 | cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL); | |
277 | if (!cells[i]) { | |
278 | rval = -ENOMEM; | |
279 | goto err; | |
280 | } | |
281 | ||
282 | rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]); | |
283 | if (IS_ERR_VALUE(rval)) { | |
284 | kfree(cells[i]); | |
285 | goto err; | |
286 | } | |
287 | ||
288 | nvmem_cell_add(cells[i]); | |
289 | } | |
290 | ||
291 | nvmem->ncells = cfg->ncells; | |
292 | /* remove tmp array */ | |
293 | kfree(cells); | |
294 | ||
295 | return 0; | |
296 | err: | |
dfdf1414 | 297 | while (i--) |
eace75cf SK |
298 | nvmem_cell_drop(cells[i]); |
299 | ||
dfdf1414 RV |
300 | kfree(cells); |
301 | ||
eace75cf SK |
302 | return rval; |
303 | } | |
304 | ||
305 | /** | |
306 | * nvmem_register() - Register a nvmem device for given nvmem_config. | |
307 | * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem | |
308 | * | |
309 | * @config: nvmem device configuration with which nvmem device is created. | |
310 | * | |
311 | * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device | |
312 | * on success. | |
313 | */ | |
314 | ||
315 | struct nvmem_device *nvmem_register(const struct nvmem_config *config) | |
316 | { | |
317 | struct nvmem_device *nvmem; | |
318 | struct device_node *np; | |
319 | struct regmap *rm; | |
320 | int rval; | |
321 | ||
322 | if (!config->dev) | |
323 | return ERR_PTR(-EINVAL); | |
324 | ||
325 | rm = dev_get_regmap(config->dev, NULL); | |
326 | if (!rm) { | |
327 | dev_err(config->dev, "Regmap not found\n"); | |
328 | return ERR_PTR(-EINVAL); | |
329 | } | |
330 | ||
331 | nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL); | |
332 | if (!nvmem) | |
333 | return ERR_PTR(-ENOMEM); | |
334 | ||
335 | rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL); | |
336 | if (rval < 0) { | |
337 | kfree(nvmem); | |
338 | return ERR_PTR(rval); | |
339 | } | |
340 | ||
341 | nvmem->id = rval; | |
342 | nvmem->regmap = rm; | |
343 | nvmem->owner = config->owner; | |
344 | nvmem->stride = regmap_get_reg_stride(rm); | |
345 | nvmem->word_size = regmap_get_val_bytes(rm); | |
346 | nvmem->size = regmap_get_max_register(rm) + nvmem->stride; | |
347 | nvmem->dev.type = &nvmem_provider_type; | |
348 | nvmem->dev.bus = &nvmem_bus_type; | |
349 | nvmem->dev.parent = config->dev; | |
350 | np = config->dev->of_node; | |
351 | nvmem->dev.of_node = np; | |
352 | dev_set_name(&nvmem->dev, "%s%d", | |
353 | config->name ? : "nvmem", config->id); | |
354 | ||
355 | nvmem->read_only = of_property_read_bool(np, "read-only") | | |
356 | config->read_only; | |
357 | ||
358 | nvmem->dev.groups = nvmem->read_only ? nvmem_ro_dev_groups : | |
359 | nvmem_rw_dev_groups; | |
360 | ||
361 | device_initialize(&nvmem->dev); | |
362 | ||
363 | dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); | |
364 | ||
365 | rval = device_add(&nvmem->dev); | |
366 | if (rval) { | |
367 | ida_simple_remove(&nvmem_ida, nvmem->id); | |
368 | kfree(nvmem); | |
369 | return ERR_PTR(rval); | |
370 | } | |
371 | ||
372 | if (config->cells) | |
373 | nvmem_add_cells(nvmem, config); | |
374 | ||
375 | return nvmem; | |
376 | } | |
377 | EXPORT_SYMBOL_GPL(nvmem_register); | |
378 | ||
379 | /** | |
380 | * nvmem_unregister() - Unregister previously registered nvmem device | |
381 | * | |
382 | * @nvmem: Pointer to previously registered nvmem device. | |
383 | * | |
384 | * Return: Will be an negative on error or a zero on success. | |
385 | */ | |
386 | int nvmem_unregister(struct nvmem_device *nvmem) | |
387 | { | |
69aba794 SK |
388 | mutex_lock(&nvmem_mutex); |
389 | if (nvmem->users) { | |
390 | mutex_unlock(&nvmem_mutex); | |
eace75cf | 391 | return -EBUSY; |
69aba794 SK |
392 | } |
393 | mutex_unlock(&nvmem_mutex); | |
eace75cf SK |
394 | |
395 | nvmem_device_remove_all_cells(nvmem); | |
396 | device_del(&nvmem->dev); | |
397 | ||
398 | return 0; | |
399 | } | |
400 | EXPORT_SYMBOL_GPL(nvmem_unregister); | |
401 | ||
69aba794 SK |
402 | static struct nvmem_device *__nvmem_device_get(struct device_node *np, |
403 | struct nvmem_cell **cellp, | |
404 | const char *cell_id) | |
405 | { | |
406 | struct nvmem_device *nvmem = NULL; | |
407 | ||
408 | mutex_lock(&nvmem_mutex); | |
409 | ||
410 | if (np) { | |
411 | nvmem = of_nvmem_find(np); | |
412 | if (!nvmem) { | |
413 | mutex_unlock(&nvmem_mutex); | |
414 | return ERR_PTR(-EPROBE_DEFER); | |
415 | } | |
416 | } else { | |
417 | struct nvmem_cell *cell = nvmem_find_cell(cell_id); | |
418 | ||
419 | if (cell) { | |
420 | nvmem = cell->nvmem; | |
421 | *cellp = cell; | |
422 | } | |
423 | ||
424 | if (!nvmem) { | |
425 | mutex_unlock(&nvmem_mutex); | |
426 | return ERR_PTR(-ENOENT); | |
427 | } | |
428 | } | |
429 | ||
430 | nvmem->users++; | |
431 | mutex_unlock(&nvmem_mutex); | |
432 | ||
433 | if (!try_module_get(nvmem->owner)) { | |
434 | dev_err(&nvmem->dev, | |
435 | "could not increase module refcount for cell %s\n", | |
436 | nvmem->name); | |
437 | ||
438 | mutex_lock(&nvmem_mutex); | |
439 | nvmem->users--; | |
440 | mutex_unlock(&nvmem_mutex); | |
441 | ||
442 | return ERR_PTR(-EINVAL); | |
443 | } | |
444 | ||
445 | return nvmem; | |
446 | } | |
447 | ||
448 | static void __nvmem_device_put(struct nvmem_device *nvmem) | |
449 | { | |
450 | module_put(nvmem->owner); | |
451 | mutex_lock(&nvmem_mutex); | |
452 | nvmem->users--; | |
453 | mutex_unlock(&nvmem_mutex); | |
454 | } | |
455 | ||
e2a5402e SK |
456 | static int nvmem_match(struct device *dev, void *data) |
457 | { | |
458 | return !strcmp(dev_name(dev), data); | |
459 | } | |
460 | ||
461 | static struct nvmem_device *nvmem_find(const char *name) | |
462 | { | |
463 | struct device *d; | |
464 | ||
465 | d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match); | |
466 | ||
467 | if (!d) | |
468 | return NULL; | |
469 | ||
470 | return to_nvmem_device(d); | |
471 | } | |
472 | ||
473 | #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) | |
474 | /** | |
475 | * of_nvmem_device_get() - Get nvmem device from a given id | |
476 | * | |
477 | * @dev node: Device tree node that uses the nvmem device | |
478 | * @id: nvmem name from nvmem-names property. | |
479 | * | |
480 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device | |
481 | * on success. | |
482 | */ | |
483 | struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id) | |
484 | { | |
485 | ||
486 | struct device_node *nvmem_np; | |
487 | int index; | |
488 | ||
489 | index = of_property_match_string(np, "nvmem-names", id); | |
490 | ||
491 | nvmem_np = of_parse_phandle(np, "nvmem", index); | |
492 | if (!nvmem_np) | |
493 | return ERR_PTR(-EINVAL); | |
494 | ||
495 | return __nvmem_device_get(nvmem_np, NULL, NULL); | |
496 | } | |
497 | EXPORT_SYMBOL_GPL(of_nvmem_device_get); | |
498 | #endif | |
499 | ||
500 | /** | |
501 | * nvmem_device_get() - Get nvmem device from a given id | |
502 | * | |
503 | * @dev : Device that uses the nvmem device | |
504 | * @id: nvmem name from nvmem-names property. | |
505 | * | |
506 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device | |
507 | * on success. | |
508 | */ | |
509 | struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name) | |
510 | { | |
511 | if (dev->of_node) { /* try dt first */ | |
512 | struct nvmem_device *nvmem; | |
513 | ||
514 | nvmem = of_nvmem_device_get(dev->of_node, dev_name); | |
515 | ||
516 | if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) | |
517 | return nvmem; | |
518 | ||
519 | } | |
520 | ||
521 | return nvmem_find(dev_name); | |
522 | } | |
523 | EXPORT_SYMBOL_GPL(nvmem_device_get); | |
524 | ||
525 | static int devm_nvmem_device_match(struct device *dev, void *res, void *data) | |
526 | { | |
527 | struct nvmem_device **nvmem = res; | |
528 | ||
529 | if (WARN_ON(!nvmem || !*nvmem)) | |
530 | return 0; | |
531 | ||
532 | return *nvmem == data; | |
533 | } | |
534 | ||
535 | static void devm_nvmem_device_release(struct device *dev, void *res) | |
536 | { | |
537 | nvmem_device_put(*(struct nvmem_device **)res); | |
538 | } | |
539 | ||
540 | /** | |
541 | * devm_nvmem_device_put() - put alredy got nvmem device | |
542 | * | |
543 | * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(), | |
544 | * that needs to be released. | |
545 | */ | |
546 | void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem) | |
547 | { | |
548 | int ret; | |
549 | ||
550 | ret = devres_release(dev, devm_nvmem_device_release, | |
551 | devm_nvmem_device_match, nvmem); | |
552 | ||
553 | WARN_ON(ret); | |
554 | } | |
555 | EXPORT_SYMBOL_GPL(devm_nvmem_device_put); | |
556 | ||
557 | /** | |
558 | * nvmem_device_put() - put alredy got nvmem device | |
559 | * | |
560 | * @nvmem: pointer to nvmem device that needs to be released. | |
561 | */ | |
562 | void nvmem_device_put(struct nvmem_device *nvmem) | |
563 | { | |
564 | __nvmem_device_put(nvmem); | |
565 | } | |
566 | EXPORT_SYMBOL_GPL(nvmem_device_put); | |
567 | ||
568 | /** | |
569 | * devm_nvmem_device_get() - Get nvmem cell of device form a given id | |
570 | * | |
571 | * @dev node: Device tree node that uses the nvmem cell | |
572 | * @id: nvmem name in nvmems property. | |
573 | * | |
574 | * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell | |
575 | * on success. The nvmem_cell will be freed by the automatically once the | |
576 | * device is freed. | |
577 | */ | |
578 | struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id) | |
579 | { | |
580 | struct nvmem_device **ptr, *nvmem; | |
581 | ||
582 | ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL); | |
583 | if (!ptr) | |
584 | return ERR_PTR(-ENOMEM); | |
585 | ||
586 | nvmem = nvmem_device_get(dev, id); | |
587 | if (!IS_ERR(nvmem)) { | |
588 | *ptr = nvmem; | |
589 | devres_add(dev, ptr); | |
590 | } else { | |
591 | devres_free(ptr); | |
592 | } | |
593 | ||
594 | return nvmem; | |
595 | } | |
596 | EXPORT_SYMBOL_GPL(devm_nvmem_device_get); | |
597 | ||
69aba794 SK |
598 | static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id) |
599 | { | |
600 | struct nvmem_cell *cell = NULL; | |
601 | struct nvmem_device *nvmem; | |
602 | ||
603 | nvmem = __nvmem_device_get(NULL, &cell, cell_id); | |
604 | if (IS_ERR(nvmem)) | |
605 | return ERR_CAST(nvmem); | |
606 | ||
607 | return cell; | |
608 | } | |
609 | ||
610 | #if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) | |
611 | /** | |
612 | * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id | |
613 | * | |
614 | * @dev node: Device tree node that uses the nvmem cell | |
615 | * @id: nvmem cell name from nvmem-cell-names property. | |
616 | * | |
617 | * Return: Will be an ERR_PTR() on error or a valid pointer | |
618 | * to a struct nvmem_cell. The nvmem_cell will be freed by the | |
619 | * nvmem_cell_put(). | |
620 | */ | |
621 | struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, | |
622 | const char *name) | |
623 | { | |
624 | struct device_node *cell_np, *nvmem_np; | |
625 | struct nvmem_cell *cell; | |
626 | struct nvmem_device *nvmem; | |
627 | const __be32 *addr; | |
628 | int rval, len, index; | |
629 | ||
630 | index = of_property_match_string(np, "nvmem-cell-names", name); | |
631 | ||
632 | cell_np = of_parse_phandle(np, "nvmem-cells", index); | |
633 | if (!cell_np) | |
634 | return ERR_PTR(-EINVAL); | |
635 | ||
636 | nvmem_np = of_get_next_parent(cell_np); | |
637 | if (!nvmem_np) | |
638 | return ERR_PTR(-EINVAL); | |
639 | ||
640 | nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); | |
641 | if (IS_ERR(nvmem)) | |
642 | return ERR_CAST(nvmem); | |
643 | ||
644 | addr = of_get_property(cell_np, "reg", &len); | |
645 | if (!addr || (len < 2 * sizeof(u32))) { | |
646 | dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n", | |
647 | cell_np->full_name); | |
648 | rval = -EINVAL; | |
649 | goto err_mem; | |
650 | } | |
651 | ||
652 | cell = kzalloc(sizeof(*cell), GFP_KERNEL); | |
653 | if (!cell) { | |
654 | rval = -ENOMEM; | |
655 | goto err_mem; | |
656 | } | |
657 | ||
658 | cell->nvmem = nvmem; | |
659 | cell->offset = be32_to_cpup(addr++); | |
660 | cell->bytes = be32_to_cpup(addr); | |
661 | cell->name = cell_np->name; | |
662 | ||
663 | addr = of_get_property(cell_np, "bits", &len); | |
664 | if (addr && len == (2 * sizeof(u32))) { | |
665 | cell->bit_offset = be32_to_cpup(addr++); | |
666 | cell->nbits = be32_to_cpup(addr); | |
667 | } | |
668 | ||
669 | if (cell->nbits) | |
670 | cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, | |
671 | BITS_PER_BYTE); | |
672 | ||
673 | if (!IS_ALIGNED(cell->offset, nvmem->stride)) { | |
674 | dev_err(&nvmem->dev, | |
675 | "cell %s unaligned to nvmem stride %d\n", | |
676 | cell->name, nvmem->stride); | |
677 | rval = -EINVAL; | |
678 | goto err_sanity; | |
679 | } | |
680 | ||
681 | nvmem_cell_add(cell); | |
682 | ||
683 | return cell; | |
684 | ||
685 | err_sanity: | |
686 | kfree(cell); | |
687 | ||
688 | err_mem: | |
689 | __nvmem_device_put(nvmem); | |
690 | ||
691 | return ERR_PTR(rval); | |
692 | } | |
693 | EXPORT_SYMBOL_GPL(of_nvmem_cell_get); | |
694 | #endif | |
695 | ||
696 | /** | |
697 | * nvmem_cell_get() - Get nvmem cell of device form a given cell name | |
698 | * | |
699 | * @dev node: Device tree node that uses the nvmem cell | |
700 | * @id: nvmem cell name to get. | |
701 | * | |
702 | * Return: Will be an ERR_PTR() on error or a valid pointer | |
703 | * to a struct nvmem_cell. The nvmem_cell will be freed by the | |
704 | * nvmem_cell_put(). | |
705 | */ | |
706 | struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id) | |
707 | { | |
708 | struct nvmem_cell *cell; | |
709 | ||
710 | if (dev->of_node) { /* try dt first */ | |
711 | cell = of_nvmem_cell_get(dev->of_node, cell_id); | |
712 | if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) | |
713 | return cell; | |
714 | } | |
715 | ||
716 | return nvmem_cell_get_from_list(cell_id); | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(nvmem_cell_get); | |
719 | ||
720 | static void devm_nvmem_cell_release(struct device *dev, void *res) | |
721 | { | |
722 | nvmem_cell_put(*(struct nvmem_cell **)res); | |
723 | } | |
724 | ||
725 | /** | |
726 | * devm_nvmem_cell_get() - Get nvmem cell of device form a given id | |
727 | * | |
728 | * @dev node: Device tree node that uses the nvmem cell | |
729 | * @id: nvmem id in nvmem-names property. | |
730 | * | |
731 | * Return: Will be an ERR_PTR() on error or a valid pointer | |
732 | * to a struct nvmem_cell. The nvmem_cell will be freed by the | |
733 | * automatically once the device is freed. | |
734 | */ | |
735 | struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id) | |
736 | { | |
737 | struct nvmem_cell **ptr, *cell; | |
738 | ||
739 | ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL); | |
740 | if (!ptr) | |
741 | return ERR_PTR(-ENOMEM); | |
742 | ||
743 | cell = nvmem_cell_get(dev, id); | |
744 | if (!IS_ERR(cell)) { | |
745 | *ptr = cell; | |
746 | devres_add(dev, ptr); | |
747 | } else { | |
748 | devres_free(ptr); | |
749 | } | |
750 | ||
751 | return cell; | |
752 | } | |
753 | EXPORT_SYMBOL_GPL(devm_nvmem_cell_get); | |
754 | ||
755 | static int devm_nvmem_cell_match(struct device *dev, void *res, void *data) | |
756 | { | |
757 | struct nvmem_cell **c = res; | |
758 | ||
759 | if (WARN_ON(!c || !*c)) | |
760 | return 0; | |
761 | ||
762 | return *c == data; | |
763 | } | |
764 | ||
765 | /** | |
766 | * devm_nvmem_cell_put() - Release previously allocated nvmem cell | |
767 | * from devm_nvmem_cell_get. | |
768 | * | |
769 | * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get() | |
770 | */ | |
771 | void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell) | |
772 | { | |
773 | int ret; | |
774 | ||
775 | ret = devres_release(dev, devm_nvmem_cell_release, | |
776 | devm_nvmem_cell_match, cell); | |
777 | ||
778 | WARN_ON(ret); | |
779 | } | |
780 | EXPORT_SYMBOL(devm_nvmem_cell_put); | |
781 | ||
782 | /** | |
783 | * nvmem_cell_put() - Release previously allocated nvmem cell. | |
784 | * | |
785 | * @cell: Previously allocated nvmem cell by nvmem_cell_get() | |
786 | */ | |
787 | void nvmem_cell_put(struct nvmem_cell *cell) | |
788 | { | |
789 | struct nvmem_device *nvmem = cell->nvmem; | |
790 | ||
791 | __nvmem_device_put(nvmem); | |
792 | nvmem_cell_drop(cell); | |
793 | } | |
794 | EXPORT_SYMBOL_GPL(nvmem_cell_put); | |
795 | ||
796 | static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, | |
797 | void *buf) | |
798 | { | |
799 | u8 *p, *b; | |
800 | int i, bit_offset = cell->bit_offset; | |
801 | ||
802 | p = b = buf; | |
803 | if (bit_offset) { | |
804 | /* First shift */ | |
805 | *b++ >>= bit_offset; | |
806 | ||
807 | /* setup rest of the bytes if any */ | |
808 | for (i = 1; i < cell->bytes; i++) { | |
809 | /* Get bits from next byte and shift them towards msb */ | |
810 | *p |= *b << (BITS_PER_BYTE - bit_offset); | |
811 | ||
812 | p = b; | |
813 | *b++ >>= bit_offset; | |
814 | } | |
815 | ||
816 | /* result fits in less bytes */ | |
817 | if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE)) | |
818 | *p-- = 0; | |
819 | } | |
820 | /* clear msb bits if any leftover in the last byte */ | |
821 | *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0); | |
822 | } | |
823 | ||
824 | static int __nvmem_cell_read(struct nvmem_device *nvmem, | |
825 | struct nvmem_cell *cell, | |
826 | void *buf, size_t *len) | |
827 | { | |
828 | int rc; | |
829 | ||
830 | rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes); | |
831 | ||
832 | if (IS_ERR_VALUE(rc)) | |
833 | return rc; | |
834 | ||
835 | /* shift bits in-place */ | |
cbf854ab | 836 | if (cell->bit_offset || cell->nbits) |
69aba794 SK |
837 | nvmem_shift_read_buffer_in_place(cell, buf); |
838 | ||
839 | *len = cell->bytes; | |
840 | ||
841 | return 0; | |
842 | } | |
843 | ||
844 | /** | |
845 | * nvmem_cell_read() - Read a given nvmem cell | |
846 | * | |
847 | * @cell: nvmem cell to be read. | |
848 | * @len: pointer to length of cell which will be populated on successful read. | |
849 | * | |
850 | * Return: ERR_PTR() on error or a valid pointer to a char * buffer on success. | |
851 | * The buffer should be freed by the consumer with a kfree(). | |
852 | */ | |
853 | void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len) | |
854 | { | |
855 | struct nvmem_device *nvmem = cell->nvmem; | |
856 | u8 *buf; | |
857 | int rc; | |
858 | ||
859 | if (!nvmem || !nvmem->regmap) | |
860 | return ERR_PTR(-EINVAL); | |
861 | ||
862 | buf = kzalloc(cell->bytes, GFP_KERNEL); | |
863 | if (!buf) | |
864 | return ERR_PTR(-ENOMEM); | |
865 | ||
866 | rc = __nvmem_cell_read(nvmem, cell, buf, len); | |
867 | if (IS_ERR_VALUE(rc)) { | |
868 | kfree(buf); | |
869 | return ERR_PTR(rc); | |
870 | } | |
871 | ||
872 | return buf; | |
873 | } | |
874 | EXPORT_SYMBOL_GPL(nvmem_cell_read); | |
875 | ||
876 | static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, | |
877 | u8 *_buf, int len) | |
878 | { | |
879 | struct nvmem_device *nvmem = cell->nvmem; | |
880 | int i, rc, nbits, bit_offset = cell->bit_offset; | |
881 | u8 v, *p, *buf, *b, pbyte, pbits; | |
882 | ||
883 | nbits = cell->nbits; | |
884 | buf = kzalloc(cell->bytes, GFP_KERNEL); | |
885 | if (!buf) | |
886 | return ERR_PTR(-ENOMEM); | |
887 | ||
888 | memcpy(buf, _buf, len); | |
889 | p = b = buf; | |
890 | ||
891 | if (bit_offset) { | |
892 | pbyte = *b; | |
893 | *b <<= bit_offset; | |
894 | ||
895 | /* setup the first byte with lsb bits from nvmem */ | |
896 | rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1); | |
897 | *b++ |= GENMASK(bit_offset - 1, 0) & v; | |
898 | ||
899 | /* setup rest of the byte if any */ | |
900 | for (i = 1; i < cell->bytes; i++) { | |
901 | /* Get last byte bits and shift them towards lsb */ | |
902 | pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset); | |
903 | pbyte = *b; | |
904 | p = b; | |
905 | *b <<= bit_offset; | |
906 | *b++ |= pbits; | |
907 | } | |
908 | } | |
909 | ||
910 | /* if it's not end on byte boundary */ | |
911 | if ((nbits + bit_offset) % BITS_PER_BYTE) { | |
912 | /* setup the last byte with msb bits from nvmem */ | |
913 | rc = regmap_raw_read(nvmem->regmap, | |
914 | cell->offset + cell->bytes - 1, &v, 1); | |
915 | *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; | |
916 | ||
917 | } | |
918 | ||
919 | return buf; | |
920 | } | |
921 | ||
922 | /** | |
923 | * nvmem_cell_write() - Write to a given nvmem cell | |
924 | * | |
925 | * @cell: nvmem cell to be written. | |
926 | * @buf: Buffer to be written. | |
927 | * @len: length of buffer to be written to nvmem cell. | |
928 | * | |
929 | * Return: length of bytes written or negative on failure. | |
930 | */ | |
931 | int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) | |
932 | { | |
933 | struct nvmem_device *nvmem = cell->nvmem; | |
934 | int rc; | |
935 | ||
936 | if (!nvmem || !nvmem->regmap || nvmem->read_only || | |
937 | (cell->bit_offset == 0 && len != cell->bytes)) | |
938 | return -EINVAL; | |
939 | ||
940 | if (cell->bit_offset || cell->nbits) { | |
941 | buf = nvmem_cell_prepare_write_buffer(cell, buf, len); | |
942 | if (IS_ERR(buf)) | |
943 | return PTR_ERR(buf); | |
944 | } | |
945 | ||
946 | rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); | |
947 | ||
948 | /* free the tmp buffer */ | |
ace22170 | 949 | if (cell->bit_offset || cell->nbits) |
69aba794 SK |
950 | kfree(buf); |
951 | ||
952 | if (IS_ERR_VALUE(rc)) | |
953 | return rc; | |
954 | ||
955 | return len; | |
956 | } | |
957 | EXPORT_SYMBOL_GPL(nvmem_cell_write); | |
958 | ||
e2a5402e SK |
959 | /** |
960 | * nvmem_device_cell_read() - Read a given nvmem device and cell | |
961 | * | |
962 | * @nvmem: nvmem device to read from. | |
963 | * @info: nvmem cell info to be read. | |
964 | * @buf: buffer pointer which will be populated on successful read. | |
965 | * | |
966 | * Return: length of successful bytes read on success and negative | |
967 | * error code on error. | |
968 | */ | |
969 | ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, | |
970 | struct nvmem_cell_info *info, void *buf) | |
971 | { | |
972 | struct nvmem_cell cell; | |
973 | int rc; | |
974 | ssize_t len; | |
975 | ||
976 | if (!nvmem || !nvmem->regmap) | |
977 | return -EINVAL; | |
978 | ||
979 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); | |
980 | if (IS_ERR_VALUE(rc)) | |
981 | return rc; | |
982 | ||
983 | rc = __nvmem_cell_read(nvmem, &cell, buf, &len); | |
984 | if (IS_ERR_VALUE(rc)) | |
985 | return rc; | |
986 | ||
987 | return len; | |
988 | } | |
989 | EXPORT_SYMBOL_GPL(nvmem_device_cell_read); | |
990 | ||
991 | /** | |
992 | * nvmem_device_cell_write() - Write cell to a given nvmem device | |
993 | * | |
994 | * @nvmem: nvmem device to be written to. | |
995 | * @info: nvmem cell info to be written | |
996 | * @buf: buffer to be written to cell. | |
997 | * | |
998 | * Return: length of bytes written or negative error code on failure. | |
999 | * */ | |
1000 | int nvmem_device_cell_write(struct nvmem_device *nvmem, | |
1001 | struct nvmem_cell_info *info, void *buf) | |
1002 | { | |
1003 | struct nvmem_cell cell; | |
1004 | int rc; | |
1005 | ||
1006 | if (!nvmem || !nvmem->regmap) | |
1007 | return -EINVAL; | |
1008 | ||
1009 | rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell); | |
1010 | if (IS_ERR_VALUE(rc)) | |
1011 | return rc; | |
1012 | ||
1013 | return nvmem_cell_write(&cell, buf, cell.bytes); | |
1014 | } | |
1015 | EXPORT_SYMBOL_GPL(nvmem_device_cell_write); | |
1016 | ||
1017 | /** | |
1018 | * nvmem_device_read() - Read from a given nvmem device | |
1019 | * | |
1020 | * @nvmem: nvmem device to read from. | |
1021 | * @offset: offset in nvmem device. | |
1022 | * @bytes: number of bytes to read. | |
1023 | * @buf: buffer pointer which will be populated on successful read. | |
1024 | * | |
1025 | * Return: length of successful bytes read on success and negative | |
1026 | * error code on error. | |
1027 | */ | |
1028 | int nvmem_device_read(struct nvmem_device *nvmem, | |
1029 | unsigned int offset, | |
1030 | size_t bytes, void *buf) | |
1031 | { | |
1032 | int rc; | |
1033 | ||
1034 | if (!nvmem || !nvmem->regmap) | |
1035 | return -EINVAL; | |
1036 | ||
1037 | rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes); | |
1038 | ||
1039 | if (IS_ERR_VALUE(rc)) | |
1040 | return rc; | |
1041 | ||
1042 | return bytes; | |
1043 | } | |
1044 | EXPORT_SYMBOL_GPL(nvmem_device_read); | |
1045 | ||
1046 | /** | |
1047 | * nvmem_device_write() - Write cell to a given nvmem device | |
1048 | * | |
1049 | * @nvmem: nvmem device to be written to. | |
1050 | * @offset: offset in nvmem device. | |
1051 | * @bytes: number of bytes to write. | |
1052 | * @buf: buffer to be written. | |
1053 | * | |
1054 | * Return: length of bytes written or negative error code on failure. | |
1055 | * */ | |
1056 | int nvmem_device_write(struct nvmem_device *nvmem, | |
1057 | unsigned int offset, | |
1058 | size_t bytes, void *buf) | |
1059 | { | |
1060 | int rc; | |
1061 | ||
1062 | if (!nvmem || !nvmem->regmap) | |
1063 | return -EINVAL; | |
1064 | ||
1065 | rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes); | |
1066 | ||
1067 | if (IS_ERR_VALUE(rc)) | |
1068 | return rc; | |
1069 | ||
1070 | ||
1071 | return bytes; | |
1072 | } | |
1073 | EXPORT_SYMBOL_GPL(nvmem_device_write); | |
1074 | ||
eace75cf SK |
1075 | static int __init nvmem_init(void) |
1076 | { | |
1077 | return bus_register(&nvmem_bus_type); | |
1078 | } | |
1079 | ||
1080 | static void __exit nvmem_exit(void) | |
1081 | { | |
1082 | bus_unregister(&nvmem_bus_type); | |
1083 | } | |
1084 | ||
1085 | subsys_initcall(nvmem_init); | |
1086 | module_exit(nvmem_exit); | |
1087 | ||
1088 | MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org"); | |
1089 | MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com"); | |
1090 | MODULE_DESCRIPTION("nvmem Driver Core"); | |
1091 | MODULE_LICENSE("GPL v2"); |