]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/base/regmap/regmap.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / base / regmap / regmap.c
CommitLineData
b83a313b
MB
1/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
f5d6eba7 13#include <linux/device.h>
b83a313b 14#include <linux/slab.h>
19694b5e 15#include <linux/export.h>
b83a313b
MB
16#include <linux/mutex.h>
17#include <linux/err.h>
d647c199 18#include <linux/of.h>
6863ca62 19#include <linux/rbtree.h>
30b2a553 20#include <linux/sched.h>
2de9d600 21#include <linux/delay.h>
ca747be2 22#include <linux/log2.h>
8698b936 23#include <linux/hwspinlock.h>
b83a313b 24
fb2736bb 25#define CREATE_TRACE_POINTS
f58078da 26#include "trace.h"
fb2736bb 27
93de9124 28#include "internal.h"
b83a313b 29
1044c180
MB
30/*
31 * Sometimes for failures during very early init the trace
32 * infrastructure isn't available early enough to be used. For this
33 * sort of problem defining LOG_DEVICE will add printks for basic
34 * register I/O on a specific device.
35 */
36#undef LOG_DEVICE
37
38static int _regmap_update_bits(struct regmap *map, unsigned int reg,
39 unsigned int mask, unsigned int val,
7ff0589c 40 bool *change, bool force_write);
1044c180 41
3ac17037
BB
42static int _regmap_bus_reg_read(void *context, unsigned int reg,
43 unsigned int *val);
ad278406
AS
44static int _regmap_bus_read(void *context, unsigned int reg,
45 unsigned int *val);
07c320dc
AS
46static int _regmap_bus_formatted_write(void *context, unsigned int reg,
47 unsigned int val);
3ac17037
BB
48static int _regmap_bus_reg_write(void *context, unsigned int reg,
49 unsigned int val);
07c320dc
AS
50static int _regmap_bus_raw_write(void *context, unsigned int reg,
51 unsigned int val);
ad278406 52
76aad392
DC
53bool regmap_reg_in_ranges(unsigned int reg,
54 const struct regmap_range *ranges,
55 unsigned int nranges)
56{
57 const struct regmap_range *r;
58 int i;
59
60 for (i = 0, r = ranges; i < nranges; i++, r++)
61 if (regmap_reg_in_range(reg, r))
62 return true;
63 return false;
64}
65EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
66
154881e5
MB
67bool regmap_check_range_table(struct regmap *map, unsigned int reg,
68 const struct regmap_access_table *table)
76aad392
DC
69{
70 /* Check "no ranges" first */
71 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
72 return false;
73
74 /* In case zero "yes ranges" are supplied, any reg is OK */
75 if (!table->n_yes_ranges)
76 return true;
77
78 return regmap_reg_in_ranges(reg, table->yes_ranges,
79 table->n_yes_ranges);
80}
154881e5 81EXPORT_SYMBOL_GPL(regmap_check_range_table);
76aad392 82
8de2f081
MB
83bool regmap_writeable(struct regmap *map, unsigned int reg)
84{
85 if (map->max_register && reg > map->max_register)
86 return false;
87
88 if (map->writeable_reg)
89 return map->writeable_reg(map->dev, reg);
90
76aad392 91 if (map->wr_table)
154881e5 92 return regmap_check_range_table(map, reg, map->wr_table);
76aad392 93
8de2f081
MB
94 return true;
95}
96
1ea975cf
CB
97bool regmap_cached(struct regmap *map, unsigned int reg)
98{
99 int ret;
100 unsigned int val;
101
5defe9ae 102 if (map->cache_type == REGCACHE_NONE)
1ea975cf
CB
103 return false;
104
105 if (!map->cache_ops)
106 return false;
107
108 if (map->max_register && reg > map->max_register)
109 return false;
110
111 map->lock(map->lock_arg);
112 ret = regcache_read(map, reg, &val);
113 map->unlock(map->lock_arg);
114 if (ret)
115 return false;
116
117 return true;
118}
119
8de2f081
MB
120bool regmap_readable(struct regmap *map, unsigned int reg)
121{
04dc91ce
LPC
122 if (!map->reg_read)
123 return false;
124
8de2f081
MB
125 if (map->max_register && reg > map->max_register)
126 return false;
127
4191f197
WS
128 if (map->format.format_write)
129 return false;
130
8de2f081
MB
131 if (map->readable_reg)
132 return map->readable_reg(map->dev, reg);
133
76aad392 134 if (map->rd_table)
154881e5 135 return regmap_check_range_table(map, reg, map->rd_table);
76aad392 136
8de2f081
MB
137 return true;
138}
139
140bool regmap_volatile(struct regmap *map, unsigned int reg)
141{
5844a8b9 142 if (!map->format.format_write && !regmap_readable(map, reg))
8de2f081
MB
143 return false;
144
145 if (map->volatile_reg)
146 return map->volatile_reg(map->dev, reg);
147
76aad392 148 if (map->volatile_table)
154881e5 149 return regmap_check_range_table(map, reg, map->volatile_table);
76aad392 150
b92be6fe
MB
151 if (map->cache_ops)
152 return false;
153 else
154 return true;
8de2f081
MB
155}
156
157bool regmap_precious(struct regmap *map, unsigned int reg)
158{
4191f197 159 if (!regmap_readable(map, reg))
8de2f081
MB
160 return false;
161
162 if (map->precious_reg)
163 return map->precious_reg(map->dev, reg);
164
76aad392 165 if (map->precious_table)
154881e5 166 return regmap_check_range_table(map, reg, map->precious_table);
76aad392 167
8de2f081
MB
168 return false;
169}
170
82cd9965 171static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
a8f28cfa 172 size_t num)
82cd9965
LPC
173{
174 unsigned int i;
175
176 for (i = 0; i < num; i++)
177 if (!regmap_volatile(map, reg + i))
178 return false;
179
180 return true;
181}
182
9aa50750
WS
183static void regmap_format_2_6_write(struct regmap *map,
184 unsigned int reg, unsigned int val)
185{
186 u8 *out = map->work_buf;
187
188 *out = (reg << 6) | val;
189}
190
b83a313b
MB
191static void regmap_format_4_12_write(struct regmap *map,
192 unsigned int reg, unsigned int val)
193{
194 __be16 *out = map->work_buf;
195 *out = cpu_to_be16((reg << 12) | val);
196}
197
198static void regmap_format_7_9_write(struct regmap *map,
199 unsigned int reg, unsigned int val)
200{
201 __be16 *out = map->work_buf;
202 *out = cpu_to_be16((reg << 9) | val);
203}
204
7e5ec63e
LPC
205static void regmap_format_10_14_write(struct regmap *map,
206 unsigned int reg, unsigned int val)
207{
208 u8 *out = map->work_buf;
209
210 out[2] = val;
211 out[1] = (val >> 8) | (reg << 6);
212 out[0] = reg >> 2;
213}
214
d939fb9a 215static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
b83a313b
MB
216{
217 u8 *b = buf;
218
d939fb9a 219 b[0] = val << shift;
b83a313b
MB
220}
221
141eba2e 222static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
b83a313b
MB
223{
224 __be16 *b = buf;
225
d939fb9a 226 b[0] = cpu_to_be16(val << shift);
b83a313b
MB
227}
228
4aa8c069
XL
229static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
230{
231 __le16 *b = buf;
232
233 b[0] = cpu_to_le16(val << shift);
234}
235
141eba2e
SW
236static void regmap_format_16_native(void *buf, unsigned int val,
237 unsigned int shift)
238{
239 *(u16 *)buf = val << shift;
240}
241
d939fb9a 242static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
ea279fc5
MR
243{
244 u8 *b = buf;
245
d939fb9a
MR
246 val <<= shift;
247
ea279fc5
MR
248 b[0] = val >> 16;
249 b[1] = val >> 8;
250 b[2] = val;
251}
252
141eba2e 253static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
7d5e525b
MB
254{
255 __be32 *b = buf;
256
d939fb9a 257 b[0] = cpu_to_be32(val << shift);
7d5e525b
MB
258}
259
4aa8c069
XL
260static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
261{
262 __le32 *b = buf;
263
264 b[0] = cpu_to_le32(val << shift);
265}
266
141eba2e
SW
267static void regmap_format_32_native(void *buf, unsigned int val,
268 unsigned int shift)
269{
270 *(u32 *)buf = val << shift;
271}
272
afcc00b9
XL
273#ifdef CONFIG_64BIT
274static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
275{
276 __be64 *b = buf;
277
01c377bf 278 b[0] = cpu_to_be64((u64)val << shift);
afcc00b9
XL
279}
280
281static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
282{
283 __le64 *b = buf;
284
01c377bf 285 b[0] = cpu_to_le64((u64)val << shift);
afcc00b9
XL
286}
287
288static void regmap_format_64_native(void *buf, unsigned int val,
289 unsigned int shift)
290{
01c377bf 291 *(u64 *)buf = (u64)val << shift;
afcc00b9
XL
292}
293#endif
294
8a819ff8 295static void regmap_parse_inplace_noop(void *buf)
b83a313b 296{
8a819ff8
MB
297}
298
299static unsigned int regmap_parse_8(const void *buf)
300{
301 const u8 *b = buf;
b83a313b
MB
302
303 return b[0];
304}
305
8a819ff8
MB
306static unsigned int regmap_parse_16_be(const void *buf)
307{
308 const __be16 *b = buf;
309
310 return be16_to_cpu(b[0]);
311}
312
4aa8c069
XL
313static unsigned int regmap_parse_16_le(const void *buf)
314{
315 const __le16 *b = buf;
316
317 return le16_to_cpu(b[0]);
318}
319
8a819ff8 320static void regmap_parse_16_be_inplace(void *buf)
b83a313b
MB
321{
322 __be16 *b = buf;
323
324 b[0] = be16_to_cpu(b[0]);
b83a313b
MB
325}
326
4aa8c069
XL
327static void regmap_parse_16_le_inplace(void *buf)
328{
329 __le16 *b = buf;
330
331 b[0] = le16_to_cpu(b[0]);
332}
333
8a819ff8 334static unsigned int regmap_parse_16_native(const void *buf)
141eba2e
SW
335{
336 return *(u16 *)buf;
337}
338
8a819ff8 339static unsigned int regmap_parse_24(const void *buf)
ea279fc5 340{
8a819ff8 341 const u8 *b = buf;
ea279fc5
MR
342 unsigned int ret = b[2];
343 ret |= ((unsigned int)b[1]) << 8;
344 ret |= ((unsigned int)b[0]) << 16;
345
346 return ret;
347}
348
8a819ff8
MB
349static unsigned int regmap_parse_32_be(const void *buf)
350{
351 const __be32 *b = buf;
352
353 return be32_to_cpu(b[0]);
354}
355
4aa8c069
XL
356static unsigned int regmap_parse_32_le(const void *buf)
357{
358 const __le32 *b = buf;
359
360 return le32_to_cpu(b[0]);
361}
362
8a819ff8 363static void regmap_parse_32_be_inplace(void *buf)
7d5e525b
MB
364{
365 __be32 *b = buf;
366
367 b[0] = be32_to_cpu(b[0]);
7d5e525b
MB
368}
369
4aa8c069
XL
370static void regmap_parse_32_le_inplace(void *buf)
371{
372 __le32 *b = buf;
373
374 b[0] = le32_to_cpu(b[0]);
375}
376
8a819ff8 377static unsigned int regmap_parse_32_native(const void *buf)
141eba2e
SW
378{
379 return *(u32 *)buf;
380}
381
afcc00b9
XL
382#ifdef CONFIG_64BIT
383static unsigned int regmap_parse_64_be(const void *buf)
384{
385 const __be64 *b = buf;
386
387 return be64_to_cpu(b[0]);
388}
389
390static unsigned int regmap_parse_64_le(const void *buf)
391{
392 const __le64 *b = buf;
393
394 return le64_to_cpu(b[0]);
395}
396
397static void regmap_parse_64_be_inplace(void *buf)
398{
399 __be64 *b = buf;
400
401 b[0] = be64_to_cpu(b[0]);
402}
403
404static void regmap_parse_64_le_inplace(void *buf)
405{
406 __le64 *b = buf;
407
408 b[0] = le64_to_cpu(b[0]);
409}
410
411static unsigned int regmap_parse_64_native(const void *buf)
412{
413 return *(u64 *)buf;
414}
415#endif
416
f25637a6 417#ifdef REGMAP_HWSPINLOCK
8698b936
BW
418static void regmap_lock_hwlock(void *__map)
419{
420 struct regmap *map = __map;
421
422 hwspin_lock_timeout(map->hwlock, UINT_MAX);
423}
424
425static void regmap_lock_hwlock_irq(void *__map)
426{
427 struct regmap *map = __map;
428
429 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
430}
431
432static void regmap_lock_hwlock_irqsave(void *__map)
433{
434 struct regmap *map = __map;
435
436 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
437 &map->spinlock_flags);
438}
439
440static void regmap_unlock_hwlock(void *__map)
441{
442 struct regmap *map = __map;
443
444 hwspin_unlock(map->hwlock);
445}
446
447static void regmap_unlock_hwlock_irq(void *__map)
448{
449 struct regmap *map = __map;
450
451 hwspin_unlock_irq(map->hwlock);
452}
453
454static void regmap_unlock_hwlock_irqrestore(void *__map)
455{
456 struct regmap *map = __map;
457
458 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
459}
f25637a6 460#endif
8698b936 461
0d4529c5 462static void regmap_lock_mutex(void *__map)
bacdbe07 463{
0d4529c5 464 struct regmap *map = __map;
bacdbe07
SW
465 mutex_lock(&map->mutex);
466}
467
0d4529c5 468static void regmap_unlock_mutex(void *__map)
bacdbe07 469{
0d4529c5 470 struct regmap *map = __map;
bacdbe07
SW
471 mutex_unlock(&map->mutex);
472}
473
0d4529c5 474static void regmap_lock_spinlock(void *__map)
b4519c71 475__acquires(&map->spinlock)
bacdbe07 476{
0d4529c5 477 struct regmap *map = __map;
92ab1aab
LPC
478 unsigned long flags;
479
480 spin_lock_irqsave(&map->spinlock, flags);
481 map->spinlock_flags = flags;
bacdbe07
SW
482}
483
0d4529c5 484static void regmap_unlock_spinlock(void *__map)
b4519c71 485__releases(&map->spinlock)
bacdbe07 486{
0d4529c5 487 struct regmap *map = __map;
92ab1aab 488 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
bacdbe07
SW
489}
490
72b39f6f
MB
491static void dev_get_regmap_release(struct device *dev, void *res)
492{
493 /*
494 * We don't actually have anything to do here; the goal here
495 * is not to manage the regmap but to provide a simple way to
496 * get the regmap back given a struct device.
497 */
498}
499
6863ca62
KG
500static bool _regmap_range_add(struct regmap *map,
501 struct regmap_range_node *data)
502{
503 struct rb_root *root = &map->range_tree;
504 struct rb_node **new = &(root->rb_node), *parent = NULL;
505
506 while (*new) {
507 struct regmap_range_node *this =
671a911b 508 rb_entry(*new, struct regmap_range_node, node);
6863ca62
KG
509
510 parent = *new;
511 if (data->range_max < this->range_min)
512 new = &((*new)->rb_left);
513 else if (data->range_min > this->range_max)
514 new = &((*new)->rb_right);
515 else
516 return false;
517 }
518
519 rb_link_node(&data->node, parent, new);
520 rb_insert_color(&data->node, root);
521
522 return true;
523}
524
525static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
526 unsigned int reg)
527{
528 struct rb_node *node = map->range_tree.rb_node;
529
530 while (node) {
531 struct regmap_range_node *this =
671a911b 532 rb_entry(node, struct regmap_range_node, node);
6863ca62
KG
533
534 if (reg < this->range_min)
535 node = node->rb_left;
536 else if (reg > this->range_max)
537 node = node->rb_right;
538 else
539 return this;
540 }
541
542 return NULL;
543}
544
545static void regmap_range_exit(struct regmap *map)
546{
547 struct rb_node *next;
548 struct regmap_range_node *range_node;
549
550 next = rb_first(&map->range_tree);
551 while (next) {
552 range_node = rb_entry(next, struct regmap_range_node, node);
553 next = rb_next(&range_node->node);
554 rb_erase(&range_node->node, &map->range_tree);
555 kfree(range_node);
556 }
557
558 kfree(map->selector_work_buf);
559}
560
6cfec04b
MS
561int regmap_attach_dev(struct device *dev, struct regmap *map,
562 const struct regmap_config *config)
563{
564 struct regmap **m;
565
566 map->dev = dev;
567
568 regmap_debugfs_init(map, config->name);
569
570 /* Add a devres resource for dev_get_regmap() */
571 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
572 if (!m) {
573 regmap_debugfs_exit(map);
574 return -ENOMEM;
575 }
576 *m = map;
577 devres_add(dev, m);
578
579 return 0;
580}
581EXPORT_SYMBOL_GPL(regmap_attach_dev);
582
cf673fbc
GU
583static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
584 const struct regmap_config *config)
585{
586 enum regmap_endian endian;
587
588 /* Retrieve the endianness specification from the regmap config */
589 endian = config->reg_format_endian;
590
591 /* If the regmap config specified a non-default value, use that */
592 if (endian != REGMAP_ENDIAN_DEFAULT)
593 return endian;
594
595 /* Retrieve the endianness specification from the bus config */
596 if (bus && bus->reg_format_endian_default)
597 endian = bus->reg_format_endian_default;
d647c199 598
cf673fbc
GU
599 /* If the bus specified a non-default value, use that */
600 if (endian != REGMAP_ENDIAN_DEFAULT)
601 return endian;
602
603 /* Use this if no other value was found */
604 return REGMAP_ENDIAN_BIG;
605}
606
3c174d29
GR
607enum regmap_endian regmap_get_val_endian(struct device *dev,
608 const struct regmap_bus *bus,
609 const struct regmap_config *config)
d647c199 610{
6e64b6cc 611 struct device_node *np;
cf673fbc 612 enum regmap_endian endian;
d647c199 613
45e1a279 614 /* Retrieve the endianness specification from the regmap config */
cf673fbc 615 endian = config->val_format_endian;
d647c199 616
45e1a279 617 /* If the regmap config specified a non-default value, use that */
cf673fbc
GU
618 if (endian != REGMAP_ENDIAN_DEFAULT)
619 return endian;
d647c199 620
6e64b6cc
PD
621 /* If the dev and dev->of_node exist try to get endianness from DT */
622 if (dev && dev->of_node) {
623 np = dev->of_node;
d647c199 624
6e64b6cc
PD
625 /* Parse the device's DT node for an endianness specification */
626 if (of_property_read_bool(np, "big-endian"))
627 endian = REGMAP_ENDIAN_BIG;
628 else if (of_property_read_bool(np, "little-endian"))
629 endian = REGMAP_ENDIAN_LITTLE;
a06c488d
MB
630 else if (of_property_read_bool(np, "native-endian"))
631 endian = REGMAP_ENDIAN_NATIVE;
6e64b6cc
PD
632
633 /* If the endianness was specified in DT, use that */
634 if (endian != REGMAP_ENDIAN_DEFAULT)
635 return endian;
636 }
45e1a279
SW
637
638 /* Retrieve the endianness specification from the bus config */
cf673fbc
GU
639 if (bus && bus->val_format_endian_default)
640 endian = bus->val_format_endian_default;
d647c199 641
45e1a279 642 /* If the bus specified a non-default value, use that */
cf673fbc
GU
643 if (endian != REGMAP_ENDIAN_DEFAULT)
644 return endian;
45e1a279
SW
645
646 /* Use this if no other value was found */
cf673fbc 647 return REGMAP_ENDIAN_BIG;
d647c199 648}
3c174d29 649EXPORT_SYMBOL_GPL(regmap_get_val_endian);
d647c199 650
3cfe7a74
NB
651struct regmap *__regmap_init(struct device *dev,
652 const struct regmap_bus *bus,
653 void *bus_context,
654 const struct regmap_config *config,
655 struct lock_class_key *lock_key,
656 const char *lock_name)
b83a313b 657{
6cfec04b 658 struct regmap *map;
b83a313b 659 int ret = -EINVAL;
141eba2e 660 enum regmap_endian reg_endian, val_endian;
6863ca62 661 int i, j;
b83a313b 662
d2a5884a 663 if (!config)
abbb18fb 664 goto err;
b83a313b
MB
665
666 map = kzalloc(sizeof(*map), GFP_KERNEL);
667 if (map == NULL) {
668 ret = -ENOMEM;
669 goto err;
670 }
671
0d4529c5
DC
672 if (config->lock && config->unlock) {
673 map->lock = config->lock;
674 map->unlock = config->unlock;
675 map->lock_arg = config->lock_arg;
8698b936 676 } else if (config->hwlock_id) {
f25637a6 677#ifdef REGMAP_HWSPINLOCK
8698b936
BW
678 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
679 if (!map->hwlock) {
680 ret = -ENXIO;
681 goto err_map;
682 }
683
684 switch (config->hwlock_mode) {
685 case HWLOCK_IRQSTATE:
686 map->lock = regmap_lock_hwlock_irqsave;
687 map->unlock = regmap_unlock_hwlock_irqrestore;
688 break;
689 case HWLOCK_IRQ:
690 map->lock = regmap_lock_hwlock_irq;
691 map->unlock = regmap_unlock_hwlock_irq;
692 break;
693 default:
694 map->lock = regmap_lock_hwlock;
695 map->unlock = regmap_unlock_hwlock;
696 break;
697 }
698
699 map->lock_arg = map;
f25637a6
MB
700#else
701 ret = -EINVAL;
c077fadf 702 goto err_map;
f25637a6 703#endif
bacdbe07 704 } else {
d2a5884a
AS
705 if ((bus && bus->fast_io) ||
706 config->fast_io) {
0d4529c5
DC
707 spin_lock_init(&map->spinlock);
708 map->lock = regmap_lock_spinlock;
709 map->unlock = regmap_unlock_spinlock;
3cfe7a74
NB
710 lockdep_set_class_and_name(&map->spinlock,
711 lock_key, lock_name);
0d4529c5
DC
712 } else {
713 mutex_init(&map->mutex);
714 map->lock = regmap_lock_mutex;
715 map->unlock = regmap_unlock_mutex;
3cfe7a74
NB
716 lockdep_set_class_and_name(&map->mutex,
717 lock_key, lock_name);
0d4529c5
DC
718 }
719 map->lock_arg = map;
bacdbe07 720 }
b4a21fc2
SB
721
722 /*
723 * When we write in fast-paths with regmap_bulk_write() don't allocate
724 * scratch buffers with sleeping allocations.
725 */
726 if ((bus && bus->fast_io) || config->fast_io)
727 map->alloc_flags = GFP_ATOMIC;
728 else
729 map->alloc_flags = GFP_KERNEL;
730
c212accc 731 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
82159ba8 732 map->format.pad_bytes = config->pad_bits / 8;
c212accc 733 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
5494a98f
FE
734 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
735 config->val_bits + config->pad_bits, 8);
d939fb9a 736 map->reg_shift = config->pad_bits % 8;
f01ee60f
SW
737 if (config->reg_stride)
738 map->reg_stride = config->reg_stride;
739 else
740 map->reg_stride = 1;
ca747be2
XL
741 if (is_power_of_2(map->reg_stride))
742 map->reg_stride_order = ilog2(map->reg_stride);
743 else
744 map->reg_stride_order = -1;
67921a1a
MP
745 map->use_single_read = config->use_single_rw || !bus || !bus->read;
746 map->use_single_write = config->use_single_rw || !bus || !bus->write;
9c9f7f67 747 map->can_multi_write = config->can_multi_write && bus && bus->write;
17649c90
SS
748 if (bus) {
749 map->max_raw_read = bus->max_raw_read;
750 map->max_raw_write = bus->max_raw_write;
751 }
b83a313b
MB
752 map->dev = dev;
753 map->bus = bus;
0135bbcc 754 map->bus_context = bus_context;
2e2ae66d 755 map->max_register = config->max_register;
76aad392
DC
756 map->wr_table = config->wr_table;
757 map->rd_table = config->rd_table;
758 map->volatile_table = config->volatile_table;
759 map->precious_table = config->precious_table;
2e2ae66d
MB
760 map->writeable_reg = config->writeable_reg;
761 map->readable_reg = config->readable_reg;
762 map->volatile_reg = config->volatile_reg;
2efe1642 763 map->precious_reg = config->precious_reg;
5d1729e7 764 map->cache_type = config->cache_type;
72b39f6f 765 map->name = config->name;
b83a313b 766
0d509f2b
MB
767 spin_lock_init(&map->async_lock);
768 INIT_LIST_HEAD(&map->async_list);
7e09a979 769 INIT_LIST_HEAD(&map->async_free);
0d509f2b
MB
770 init_waitqueue_head(&map->async_waitq);
771
6f306441
LPC
772 if (config->read_flag_mask || config->write_flag_mask) {
773 map->read_flag_mask = config->read_flag_mask;
774 map->write_flag_mask = config->write_flag_mask;
d2a5884a 775 } else if (bus) {
6f306441
LPC
776 map->read_flag_mask = bus->read_flag_mask;
777 }
778
d2a5884a
AS
779 if (!bus) {
780 map->reg_read = config->reg_read;
781 map->reg_write = config->reg_write;
782
3ac17037
BB
783 map->defer_caching = false;
784 goto skip_format_initialization;
785 } else if (!bus->read || !bus->write) {
786 map->reg_read = _regmap_bus_reg_read;
787 map->reg_write = _regmap_bus_reg_write;
788
d2a5884a
AS
789 map->defer_caching = false;
790 goto skip_format_initialization;
791 } else {
792 map->reg_read = _regmap_bus_read;
77792b11 793 map->reg_update_bits = bus->reg_update_bits;
d2a5884a 794 }
ad278406 795
cf673fbc
GU
796 reg_endian = regmap_get_reg_endian(bus, config);
797 val_endian = regmap_get_val_endian(dev, bus, config);
141eba2e 798
d939fb9a 799 switch (config->reg_bits + map->reg_shift) {
9aa50750
WS
800 case 2:
801 switch (config->val_bits) {
802 case 6:
803 map->format.format_write = regmap_format_2_6_write;
804 break;
805 default:
8698b936 806 goto err_hwlock;
9aa50750
WS
807 }
808 break;
809
b83a313b
MB
810 case 4:
811 switch (config->val_bits) {
812 case 12:
813 map->format.format_write = regmap_format_4_12_write;
814 break;
815 default:
8698b936 816 goto err_hwlock;
b83a313b
MB
817 }
818 break;
819
820 case 7:
821 switch (config->val_bits) {
822 case 9:
823 map->format.format_write = regmap_format_7_9_write;
824 break;
825 default:
8698b936 826 goto err_hwlock;
b83a313b
MB
827 }
828 break;
829
7e5ec63e
LPC
830 case 10:
831 switch (config->val_bits) {
832 case 14:
833 map->format.format_write = regmap_format_10_14_write;
834 break;
835 default:
8698b936 836 goto err_hwlock;
7e5ec63e
LPC
837 }
838 break;
839
b83a313b
MB
840 case 8:
841 map->format.format_reg = regmap_format_8;
842 break;
843
844 case 16:
141eba2e
SW
845 switch (reg_endian) {
846 case REGMAP_ENDIAN_BIG:
847 map->format.format_reg = regmap_format_16_be;
848 break;
55562449
TL
849 case REGMAP_ENDIAN_LITTLE:
850 map->format.format_reg = regmap_format_16_le;
851 break;
141eba2e
SW
852 case REGMAP_ENDIAN_NATIVE:
853 map->format.format_reg = regmap_format_16_native;
854 break;
855 default:
8698b936 856 goto err_hwlock;
141eba2e 857 }
b83a313b
MB
858 break;
859
237019e7
LPC
860 case 24:
861 if (reg_endian != REGMAP_ENDIAN_BIG)
8698b936 862 goto err_hwlock;
237019e7
LPC
863 map->format.format_reg = regmap_format_24;
864 break;
865
7d5e525b 866 case 32:
141eba2e
SW
867 switch (reg_endian) {
868 case REGMAP_ENDIAN_BIG:
869 map->format.format_reg = regmap_format_32_be;
870 break;
55562449
TL
871 case REGMAP_ENDIAN_LITTLE:
872 map->format.format_reg = regmap_format_32_le;
873 break;
141eba2e
SW
874 case REGMAP_ENDIAN_NATIVE:
875 map->format.format_reg = regmap_format_32_native;
876 break;
877 default:
8698b936 878 goto err_hwlock;
141eba2e 879 }
7d5e525b
MB
880 break;
881
afcc00b9
XL
882#ifdef CONFIG_64BIT
883 case 64:
884 switch (reg_endian) {
885 case REGMAP_ENDIAN_BIG:
886 map->format.format_reg = regmap_format_64_be;
887 break;
55562449
TL
888 case REGMAP_ENDIAN_LITTLE:
889 map->format.format_reg = regmap_format_64_le;
890 break;
afcc00b9
XL
891 case REGMAP_ENDIAN_NATIVE:
892 map->format.format_reg = regmap_format_64_native;
893 break;
894 default:
8698b936 895 goto err_hwlock;
afcc00b9
XL
896 }
897 break;
898#endif
899
b83a313b 900 default:
8698b936 901 goto err_hwlock;
b83a313b
MB
902 }
903
8a819ff8
MB
904 if (val_endian == REGMAP_ENDIAN_NATIVE)
905 map->format.parse_inplace = regmap_parse_inplace_noop;
906
b83a313b
MB
907 switch (config->val_bits) {
908 case 8:
909 map->format.format_val = regmap_format_8;
910 map->format.parse_val = regmap_parse_8;
8a819ff8 911 map->format.parse_inplace = regmap_parse_inplace_noop;
b83a313b
MB
912 break;
913 case 16:
141eba2e
SW
914 switch (val_endian) {
915 case REGMAP_ENDIAN_BIG:
916 map->format.format_val = regmap_format_16_be;
917 map->format.parse_val = regmap_parse_16_be;
8a819ff8 918 map->format.parse_inplace = regmap_parse_16_be_inplace;
141eba2e 919 break;
4aa8c069
XL
920 case REGMAP_ENDIAN_LITTLE:
921 map->format.format_val = regmap_format_16_le;
922 map->format.parse_val = regmap_parse_16_le;
923 map->format.parse_inplace = regmap_parse_16_le_inplace;
924 break;
141eba2e
SW
925 case REGMAP_ENDIAN_NATIVE:
926 map->format.format_val = regmap_format_16_native;
927 map->format.parse_val = regmap_parse_16_native;
928 break;
929 default:
8698b936 930 goto err_hwlock;
141eba2e 931 }
b83a313b 932 break;
ea279fc5 933 case 24:
141eba2e 934 if (val_endian != REGMAP_ENDIAN_BIG)
8698b936 935 goto err_hwlock;
ea279fc5
MR
936 map->format.format_val = regmap_format_24;
937 map->format.parse_val = regmap_parse_24;
938 break;
7d5e525b 939 case 32:
141eba2e
SW
940 switch (val_endian) {
941 case REGMAP_ENDIAN_BIG:
942 map->format.format_val = regmap_format_32_be;
943 map->format.parse_val = regmap_parse_32_be;
8a819ff8 944 map->format.parse_inplace = regmap_parse_32_be_inplace;
141eba2e 945 break;
4aa8c069
XL
946 case REGMAP_ENDIAN_LITTLE:
947 map->format.format_val = regmap_format_32_le;
948 map->format.parse_val = regmap_parse_32_le;
949 map->format.parse_inplace = regmap_parse_32_le_inplace;
950 break;
141eba2e
SW
951 case REGMAP_ENDIAN_NATIVE:
952 map->format.format_val = regmap_format_32_native;
953 map->format.parse_val = regmap_parse_32_native;
954 break;
955 default:
8698b936 956 goto err_hwlock;
141eba2e 957 }
7d5e525b 958 break;
afcc00b9 959#ifdef CONFIG_64BIT
782035ea 960 case 64:
afcc00b9
XL
961 switch (val_endian) {
962 case REGMAP_ENDIAN_BIG:
963 map->format.format_val = regmap_format_64_be;
964 map->format.parse_val = regmap_parse_64_be;
965 map->format.parse_inplace = regmap_parse_64_be_inplace;
966 break;
967 case REGMAP_ENDIAN_LITTLE:
968 map->format.format_val = regmap_format_64_le;
969 map->format.parse_val = regmap_parse_64_le;
970 map->format.parse_inplace = regmap_parse_64_le_inplace;
971 break;
972 case REGMAP_ENDIAN_NATIVE:
973 map->format.format_val = regmap_format_64_native;
974 map->format.parse_val = regmap_parse_64_native;
975 break;
976 default:
8698b936 977 goto err_hwlock;
afcc00b9
XL
978 }
979 break;
980#endif
b83a313b
MB
981 }
982
141eba2e
SW
983 if (map->format.format_write) {
984 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
985 (val_endian != REGMAP_ENDIAN_BIG))
8698b936 986 goto err_hwlock;
67921a1a 987 map->use_single_write = true;
141eba2e 988 }
7a647614 989
b83a313b
MB
990 if (!map->format.format_write &&
991 !(map->format.format_reg && map->format.format_val))
8698b936 992 goto err_hwlock;
b83a313b 993
82159ba8 994 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
b83a313b
MB
995 if (map->work_buf == NULL) {
996 ret = -ENOMEM;
8698b936 997 goto err_hwlock;
b83a313b
MB
998 }
999
d2a5884a
AS
1000 if (map->format.format_write) {
1001 map->defer_caching = false;
07c320dc 1002 map->reg_write = _regmap_bus_formatted_write;
d2a5884a
AS
1003 } else if (map->format.format_val) {
1004 map->defer_caching = true;
07c320dc 1005 map->reg_write = _regmap_bus_raw_write;
d2a5884a
AS
1006 }
1007
1008skip_format_initialization:
07c320dc 1009
6863ca62 1010 map->range_tree = RB_ROOT;
e3549cd0 1011 for (i = 0; i < config->num_ranges; i++) {
6863ca62
KG
1012 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1013 struct regmap_range_node *new;
1014
1015 /* Sanity check */
061adc06
MB
1016 if (range_cfg->range_max < range_cfg->range_min) {
1017 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1018 range_cfg->range_max, range_cfg->range_min);
6863ca62 1019 goto err_range;
061adc06
MB
1020 }
1021
1022 if (range_cfg->range_max > map->max_register) {
1023 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1024 range_cfg->range_max, map->max_register);
1025 goto err_range;
1026 }
1027
1028 if (range_cfg->selector_reg > map->max_register) {
1029 dev_err(map->dev,
1030 "Invalid range %d: selector out of map\n", i);
1031 goto err_range;
1032 }
1033
1034 if (range_cfg->window_len == 0) {
1035 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1036 i);
1037 goto err_range;
1038 }
6863ca62
KG
1039
1040 /* Make sure, that this register range has no selector
1041 or data window within its boundary */
e3549cd0 1042 for (j = 0; j < config->num_ranges; j++) {
6863ca62
KG
1043 unsigned sel_reg = config->ranges[j].selector_reg;
1044 unsigned win_min = config->ranges[j].window_start;
1045 unsigned win_max = win_min +
1046 config->ranges[j].window_len - 1;
1047
f161d220
PZ
1048 /* Allow data window inside its own virtual range */
1049 if (j == i)
1050 continue;
1051
6863ca62
KG
1052 if (range_cfg->range_min <= sel_reg &&
1053 sel_reg <= range_cfg->range_max) {
061adc06
MB
1054 dev_err(map->dev,
1055 "Range %d: selector for %d in window\n",
1056 i, j);
6863ca62
KG
1057 goto err_range;
1058 }
1059
1060 if (!(win_max < range_cfg->range_min ||
1061 win_min > range_cfg->range_max)) {
061adc06
MB
1062 dev_err(map->dev,
1063 "Range %d: window for %d in window\n",
1064 i, j);
6863ca62
KG
1065 goto err_range;
1066 }
1067 }
1068
1069 new = kzalloc(sizeof(*new), GFP_KERNEL);
1070 if (new == NULL) {
1071 ret = -ENOMEM;
1072 goto err_range;
1073 }
1074
4b020b3f 1075 new->map = map;
d058bb49 1076 new->name = range_cfg->name;
6863ca62
KG
1077 new->range_min = range_cfg->range_min;
1078 new->range_max = range_cfg->range_max;
1079 new->selector_reg = range_cfg->selector_reg;
1080 new->selector_mask = range_cfg->selector_mask;
1081 new->selector_shift = range_cfg->selector_shift;
1082 new->window_start = range_cfg->window_start;
1083 new->window_len = range_cfg->window_len;
1084
53e87f88 1085 if (!_regmap_range_add(map, new)) {
061adc06 1086 dev_err(map->dev, "Failed to add range %d\n", i);
6863ca62
KG
1087 kfree(new);
1088 goto err_range;
1089 }
1090
1091 if (map->selector_work_buf == NULL) {
1092 map->selector_work_buf =
1093 kzalloc(map->format.buf_size, GFP_KERNEL);
1094 if (map->selector_work_buf == NULL) {
1095 ret = -ENOMEM;
1096 goto err_range;
1097 }
1098 }
1099 }
052d2cd1 1100
e5e3b8ab 1101 ret = regcache_init(map, config);
0ff3e62f 1102 if (ret != 0)
6863ca62
KG
1103 goto err_range;
1104
a7a037c8 1105 if (dev) {
6cfec04b
MS
1106 ret = regmap_attach_dev(dev, map, config);
1107 if (ret != 0)
1108 goto err_regcache;
a7a037c8 1109 }
72b39f6f 1110
b83a313b
MB
1111 return map;
1112
6cfec04b 1113err_regcache:
72b39f6f 1114 regcache_exit(map);
6863ca62
KG
1115err_range:
1116 regmap_range_exit(map);
58072cbf 1117 kfree(map->work_buf);
8698b936 1118err_hwlock:
267f3e4f
MB
1119 if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
1120 hwspin_lock_free(map->hwlock);
b83a313b
MB
1121err_map:
1122 kfree(map);
1123err:
1124 return ERR_PTR(ret);
1125}
3cfe7a74 1126EXPORT_SYMBOL_GPL(__regmap_init);
b83a313b 1127
c0eb4676
MB
1128static void devm_regmap_release(struct device *dev, void *res)
1129{
1130 regmap_exit(*(struct regmap **)res);
1131}
1132
3cfe7a74
NB
1133struct regmap *__devm_regmap_init(struct device *dev,
1134 const struct regmap_bus *bus,
1135 void *bus_context,
1136 const struct regmap_config *config,
1137 struct lock_class_key *lock_key,
1138 const char *lock_name)
c0eb4676
MB
1139{
1140 struct regmap **ptr, *regmap;
1141
1142 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1143 if (!ptr)
1144 return ERR_PTR(-ENOMEM);
1145
3cfe7a74
NB
1146 regmap = __regmap_init(dev, bus, bus_context, config,
1147 lock_key, lock_name);
c0eb4676
MB
1148 if (!IS_ERR(regmap)) {
1149 *ptr = regmap;
1150 devres_add(dev, ptr);
1151 } else {
1152 devres_free(ptr);
1153 }
1154
1155 return regmap;
1156}
3cfe7a74 1157EXPORT_SYMBOL_GPL(__devm_regmap_init);
c0eb4676 1158
67252287
SK
1159static void regmap_field_init(struct regmap_field *rm_field,
1160 struct regmap *regmap, struct reg_field reg_field)
1161{
67252287
SK
1162 rm_field->regmap = regmap;
1163 rm_field->reg = reg_field.reg;
1164 rm_field->shift = reg_field.lsb;
921cc294 1165 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
a0102375
KM
1166 rm_field->id_size = reg_field.id_size;
1167 rm_field->id_offset = reg_field.id_offset;
67252287
SK
1168}
1169
1170/**
2cf8e2df 1171 * devm_regmap_field_alloc() - Allocate and initialise a register field.
67252287
SK
1172 *
1173 * @dev: Device that will be interacted with
1174 * @regmap: regmap bank in which this register field is located.
1175 * @reg_field: Register field with in the bank.
1176 *
1177 * The return value will be an ERR_PTR() on error or a valid pointer
1178 * to a struct regmap_field. The regmap_field will be automatically freed
1179 * by the device management code.
1180 */
1181struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1182 struct regmap *regmap, struct reg_field reg_field)
1183{
1184 struct regmap_field *rm_field = devm_kzalloc(dev,
1185 sizeof(*rm_field), GFP_KERNEL);
1186 if (!rm_field)
1187 return ERR_PTR(-ENOMEM);
1188
1189 regmap_field_init(rm_field, regmap, reg_field);
1190
1191 return rm_field;
1192
1193}
1194EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1195
1196/**
2cf8e2df
CK
1197 * devm_regmap_field_free() - Free a register field allocated using
1198 * devm_regmap_field_alloc.
67252287
SK
1199 *
1200 * @dev: Device that will be interacted with
1201 * @field: regmap field which should be freed.
2cf8e2df
CK
1202 *
1203 * Free register field allocated using devm_regmap_field_alloc(). Usually
1204 * drivers need not call this function, as the memory allocated via devm
1205 * will be freed as per device-driver life-cyle.
67252287
SK
1206 */
1207void devm_regmap_field_free(struct device *dev,
1208 struct regmap_field *field)
1209{
1210 devm_kfree(dev, field);
1211}
1212EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1213
1214/**
2cf8e2df 1215 * regmap_field_alloc() - Allocate and initialise a register field.
67252287
SK
1216 *
1217 * @regmap: regmap bank in which this register field is located.
1218 * @reg_field: Register field with in the bank.
1219 *
1220 * The return value will be an ERR_PTR() on error or a valid pointer
1221 * to a struct regmap_field. The regmap_field should be freed by the
1222 * user once its finished working with it using regmap_field_free().
1223 */
1224struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1225 struct reg_field reg_field)
1226{
1227 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1228
1229 if (!rm_field)
1230 return ERR_PTR(-ENOMEM);
1231
1232 regmap_field_init(rm_field, regmap, reg_field);
1233
1234 return rm_field;
1235}
1236EXPORT_SYMBOL_GPL(regmap_field_alloc);
1237
1238/**
2cf8e2df
CK
1239 * regmap_field_free() - Free register field allocated using
1240 * regmap_field_alloc.
67252287
SK
1241 *
1242 * @field: regmap field which should be freed.
1243 */
1244void regmap_field_free(struct regmap_field *field)
1245{
1246 kfree(field);
1247}
1248EXPORT_SYMBOL_GPL(regmap_field_free);
1249
bf315173 1250/**
2cf8e2df 1251 * regmap_reinit_cache() - Reinitialise the current register cache
bf315173
MB
1252 *
1253 * @map: Register map to operate on.
1254 * @config: New configuration. Only the cache data will be used.
1255 *
1256 * Discard any existing register cache for the map and initialize a
1257 * new cache. This can be used to restore the cache to defaults or to
1258 * update the cache configuration to reflect runtime discovery of the
1259 * hardware.
4d879514
DP
1260 *
1261 * No explicit locking is done here, the user needs to ensure that
1262 * this function will not race with other calls to regmap.
bf315173
MB
1263 */
1264int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1265{
bf315173 1266 regcache_exit(map);
a24f64a6 1267 regmap_debugfs_exit(map);
bf315173
MB
1268
1269 map->max_register = config->max_register;
1270 map->writeable_reg = config->writeable_reg;
1271 map->readable_reg = config->readable_reg;
1272 map->volatile_reg = config->volatile_reg;
1273 map->precious_reg = config->precious_reg;
1274 map->cache_type = config->cache_type;
1275
d3c242e1 1276 regmap_debugfs_init(map, config->name);
a24f64a6 1277
421e8d2d
MB
1278 map->cache_bypass = false;
1279 map->cache_only = false;
1280
4d879514 1281 return regcache_init(map, config);
bf315173 1282}
752a6a5f 1283EXPORT_SYMBOL_GPL(regmap_reinit_cache);
bf315173 1284
b83a313b 1285/**
2cf8e2df
CK
1286 * regmap_exit() - Free a previously allocated register map
1287 *
1288 * @map: Register map to operate on.
b83a313b
MB
1289 */
1290void regmap_exit(struct regmap *map)
1291{
7e09a979
MB
1292 struct regmap_async *async;
1293
5d1729e7 1294 regcache_exit(map);
31244e39 1295 regmap_debugfs_exit(map);
6863ca62 1296 regmap_range_exit(map);
d2a5884a 1297 if (map->bus && map->bus->free_context)
0135bbcc 1298 map->bus->free_context(map->bus_context);
b83a313b 1299 kfree(map->work_buf);
7e09a979
MB
1300 while (!list_empty(&map->async_free)) {
1301 async = list_first_entry_or_null(&map->async_free,
1302 struct regmap_async,
1303 list);
1304 list_del(&async->list);
1305 kfree(async->work_buf);
1306 kfree(async);
1307 }
e8419c40
MB
1308 if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock)
1309 hwspin_lock_free(map->hwlock);
b83a313b
MB
1310 kfree(map);
1311}
1312EXPORT_SYMBOL_GPL(regmap_exit);
1313
72b39f6f
MB
1314static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1315{
1316 struct regmap **r = res;
1317 if (!r || !*r) {
1318 WARN_ON(!r || !*r);
1319 return 0;
1320 }
1321
1322 /* If the user didn't specify a name match any */
1323 if (data)
1324 return (*r)->name == data;
1325 else
1326 return 1;
1327}
1328
1329/**
2cf8e2df 1330 * dev_get_regmap() - Obtain the regmap (if any) for a device
72b39f6f
MB
1331 *
1332 * @dev: Device to retrieve the map for
1333 * @name: Optional name for the register map, usually NULL.
1334 *
1335 * Returns the regmap for the device if one is present, or NULL. If
1336 * name is specified then it must match the name specified when
1337 * registering the device, if it is NULL then the first regmap found
1338 * will be used. Devices with multiple register maps are very rare,
1339 * generic code should normally not need to specify a name.
1340 */
1341struct regmap *dev_get_regmap(struct device *dev, const char *name)
1342{
1343 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1344 dev_get_regmap_match, (void *)name);
1345
1346 if (!r)
1347 return NULL;
1348 return *r;
1349}
1350EXPORT_SYMBOL_GPL(dev_get_regmap);
1351
8d7d3972 1352/**
2cf8e2df 1353 * regmap_get_device() - Obtain the device from a regmap
8d7d3972
TT
1354 *
1355 * @map: Register map to operate on.
1356 *
1357 * Returns the underlying device that the regmap has been created for.
1358 */
1359struct device *regmap_get_device(struct regmap *map)
1360{
1361 return map->dev;
1362}
fa2fbe4a 1363EXPORT_SYMBOL_GPL(regmap_get_device);
8d7d3972 1364
6863ca62 1365static int _regmap_select_page(struct regmap *map, unsigned int *reg,
98bc7dfd 1366 struct regmap_range_node *range,
6863ca62
KG
1367 unsigned int val_num)
1368{
6863ca62
KG
1369 void *orig_work_buf;
1370 unsigned int win_offset;
1371 unsigned int win_page;
1372 bool page_chg;
1373 int ret;
1374
98bc7dfd
MB
1375 win_offset = (*reg - range->range_min) % range->window_len;
1376 win_page = (*reg - range->range_min) / range->window_len;
6863ca62 1377
98bc7dfd
MB
1378 if (val_num > 1) {
1379 /* Bulk write shouldn't cross range boundary */
1380 if (*reg + val_num - 1 > range->range_max)
1381 return -EINVAL;
6863ca62 1382
98bc7dfd
MB
1383 /* ... or single page boundary */
1384 if (val_num > range->window_len - win_offset)
1385 return -EINVAL;
1386 }
6863ca62 1387
98bc7dfd
MB
1388 /* It is possible to have selector register inside data window.
1389 In that case, selector register is located on every page and
1390 it needs no page switching, when accessed alone. */
1391 if (val_num > 1 ||
1392 range->window_start + win_offset != range->selector_reg) {
1393 /* Use separate work_buf during page switching */
1394 orig_work_buf = map->work_buf;
1395 map->work_buf = map->selector_work_buf;
6863ca62 1396
98bc7dfd
MB
1397 ret = _regmap_update_bits(map, range->selector_reg,
1398 range->selector_mask,
1399 win_page << range->selector_shift,
7ff0589c 1400 &page_chg, false);
632a5b01 1401
98bc7dfd 1402 map->work_buf = orig_work_buf;
6863ca62 1403
0ff3e62f 1404 if (ret != 0)
98bc7dfd 1405 return ret;
6863ca62
KG
1406 }
1407
98bc7dfd
MB
1408 *reg = range->window_start + win_offset;
1409
6863ca62
KG
1410 return 0;
1411}
1412
f50e38c9
TL
1413static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1414 unsigned long mask)
1415{
1416 u8 *buf;
1417 int i;
1418
1419 if (!mask || !map->work_buf)
1420 return;
1421
1422 buf = map->work_buf;
1423
1424 for (i = 0; i < max_bytes; i++)
1425 buf[i] |= (mask >> (8 * i)) & 0xff;
1426}
1427
584de329 1428int _regmap_raw_write(struct regmap *map, unsigned int reg,
0a819809 1429 const void *val, size_t val_len)
b83a313b 1430{
98bc7dfd 1431 struct regmap_range_node *range;
0d509f2b 1432 unsigned long flags;
0d509f2b
MB
1433 void *work_val = map->work_buf + map->format.reg_bytes +
1434 map->format.pad_bytes;
b83a313b
MB
1435 void *buf;
1436 int ret = -ENOTSUPP;
1437 size_t len;
73304781
MB
1438 int i;
1439
f1b5c5c3 1440 WARN_ON(!map->bus);
d2a5884a 1441
73304781
MB
1442 /* Check for unwritable registers before we start */
1443 if (map->writeable_reg)
1444 for (i = 0; i < val_len / map->format.val_bytes; i++)
f01ee60f 1445 if (!map->writeable_reg(map->dev,
ca747be2 1446 reg + regmap_get_offset(map, i)))
73304781 1447 return -EINVAL;
b83a313b 1448
c9157198
LD
1449 if (!map->cache_bypass && map->format.parse_val) {
1450 unsigned int ival;
1451 int val_bytes = map->format.val_bytes;
1452 for (i = 0; i < val_len / val_bytes; i++) {
5a08d156 1453 ival = map->format.parse_val(val + (i * val_bytes));
ca747be2
XL
1454 ret = regcache_write(map,
1455 reg + regmap_get_offset(map, i),
f01ee60f 1456 ival);
c9157198
LD
1457 if (ret) {
1458 dev_err(map->dev,
6d04b8ac 1459 "Error in caching of register: %x ret: %d\n",
c9157198
LD
1460 reg + i, ret);
1461 return ret;
1462 }
1463 }
1464 if (map->cache_only) {
1465 map->cache_dirty = true;
1466 return 0;
1467 }
1468 }
1469
98bc7dfd
MB
1470 range = _regmap_range_lookup(map, reg);
1471 if (range) {
8a2ceac6
MB
1472 int val_num = val_len / map->format.val_bytes;
1473 int win_offset = (reg - range->range_min) % range->window_len;
1474 int win_residue = range->window_len - win_offset;
1475
1476 /* If the write goes beyond the end of the window split it */
1477 while (val_num > win_residue) {
1a61cfe3 1478 dev_dbg(map->dev, "Writing window %d/%zu\n",
8a2ceac6
MB
1479 win_residue, val_len / map->format.val_bytes);
1480 ret = _regmap_raw_write(map, reg, val, win_residue *
0a819809 1481 map->format.val_bytes);
8a2ceac6
MB
1482 if (ret != 0)
1483 return ret;
1484
1485 reg += win_residue;
1486 val_num -= win_residue;
1487 val += win_residue * map->format.val_bytes;
1488 val_len -= win_residue * map->format.val_bytes;
1489
1490 win_offset = (reg - range->range_min) %
1491 range->window_len;
1492 win_residue = range->window_len - win_offset;
1493 }
1494
1495 ret = _regmap_select_page(map, &reg, range, val_num);
0ff3e62f 1496 if (ret != 0)
98bc7dfd
MB
1497 return ret;
1498 }
6863ca62 1499
d939fb9a 1500 map->format.format_reg(map->work_buf, reg, map->reg_shift);
f50e38c9
TL
1501 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1502 map->write_flag_mask);
6f306441 1503
651e013e
MB
1504 /*
1505 * Essentially all I/O mechanisms will be faster with a single
1506 * buffer to write. Since register syncs often generate raw
1507 * writes of single registers optimise that case.
1508 */
1509 if (val != work_val && val_len == map->format.val_bytes) {
1510 memcpy(work_val, val, map->format.val_bytes);
1511 val = work_val;
1512 }
1513
0a819809 1514 if (map->async && map->bus->async_write) {
7e09a979 1515 struct regmap_async *async;
0d509f2b 1516
c6b570d9 1517 trace_regmap_async_write_start(map, reg, val_len);
fe7d4ccd 1518
7e09a979
MB
1519 spin_lock_irqsave(&map->async_lock, flags);
1520 async = list_first_entry_or_null(&map->async_free,
1521 struct regmap_async,
1522 list);
1523 if (async)
1524 list_del(&async->list);
1525 spin_unlock_irqrestore(&map->async_lock, flags);
1526
1527 if (!async) {
1528 async = map->bus->async_alloc();
1529 if (!async)
1530 return -ENOMEM;
1531
1532 async->work_buf = kzalloc(map->format.buf_size,
1533 GFP_KERNEL | GFP_DMA);
1534 if (!async->work_buf) {
1535 kfree(async);
1536 return -ENOMEM;
1537 }
0d509f2b
MB
1538 }
1539
0d509f2b
MB
1540 async->map = map;
1541
1542 /* If the caller supplied the value we can use it safely. */
1543 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1544 map->format.reg_bytes + map->format.val_bytes);
0d509f2b
MB
1545
1546 spin_lock_irqsave(&map->async_lock, flags);
1547 list_add_tail(&async->list, &map->async_list);
1548 spin_unlock_irqrestore(&map->async_lock, flags);
1549
04c50ccf
MB
1550 if (val != work_val)
1551 ret = map->bus->async_write(map->bus_context,
1552 async->work_buf,
1553 map->format.reg_bytes +
1554 map->format.pad_bytes,
1555 val, val_len, async);
1556 else
1557 ret = map->bus->async_write(map->bus_context,
1558 async->work_buf,
1559 map->format.reg_bytes +
1560 map->format.pad_bytes +
1561 val_len, NULL, 0, async);
0d509f2b
MB
1562
1563 if (ret != 0) {
1564 dev_err(map->dev, "Failed to schedule write: %d\n",
1565 ret);
1566
1567 spin_lock_irqsave(&map->async_lock, flags);
7e09a979 1568 list_move(&async->list, &map->async_free);
0d509f2b 1569 spin_unlock_irqrestore(&map->async_lock, flags);
0d509f2b 1570 }
f951b658
MB
1571
1572 return ret;
0d509f2b
MB
1573 }
1574
c6b570d9 1575 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
fb2736bb 1576
2547e201
MB
1577 /* If we're doing a single register write we can probably just
1578 * send the work_buf directly, otherwise try to do a gather
1579 * write.
1580 */
0d509f2b 1581 if (val == work_val)
0135bbcc 1582 ret = map->bus->write(map->bus_context, map->work_buf,
82159ba8
MB
1583 map->format.reg_bytes +
1584 map->format.pad_bytes +
1585 val_len);
2547e201 1586 else if (map->bus->gather_write)
0135bbcc 1587 ret = map->bus->gather_write(map->bus_context, map->work_buf,
82159ba8
MB
1588 map->format.reg_bytes +
1589 map->format.pad_bytes,
b83a313b 1590 val, val_len);
e18a2531
SK
1591 else
1592 ret = -ENOTSUPP;
b83a313b 1593
2547e201 1594 /* If that didn't work fall back on linearising by hand. */
b83a313b 1595 if (ret == -ENOTSUPP) {
82159ba8
MB
1596 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1597 buf = kzalloc(len, GFP_KERNEL);
b83a313b
MB
1598 if (!buf)
1599 return -ENOMEM;
1600
1601 memcpy(buf, map->work_buf, map->format.reg_bytes);
82159ba8
MB
1602 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1603 val, val_len);
0135bbcc 1604 ret = map->bus->write(map->bus_context, buf, len);
b83a313b
MB
1605
1606 kfree(buf);
815806e3 1607 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
f0aa1ce6
NY
1608 /* regcache_drop_region() takes lock that we already have,
1609 * thus call map->cache_ops->drop() directly
1610 */
1611 if (map->cache_ops && map->cache_ops->drop)
1612 map->cache_ops->drop(map, reg, reg + 1);
b83a313b
MB
1613 }
1614
c6b570d9 1615 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
fb2736bb 1616
b83a313b
MB
1617 return ret;
1618}
1619
221ad7f2
MB
1620/**
1621 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1622 *
1623 * @map: Map to check.
1624 */
1625bool regmap_can_raw_write(struct regmap *map)
1626{
07ea400e
MP
1627 return map->bus && map->bus->write && map->format.format_val &&
1628 map->format.format_reg;
221ad7f2
MB
1629}
1630EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1631
f50c9eb4
MP
1632/**
1633 * regmap_get_raw_read_max - Get the maximum size we can read
1634 *
1635 * @map: Map to check.
1636 */
1637size_t regmap_get_raw_read_max(struct regmap *map)
1638{
1639 return map->max_raw_read;
1640}
1641EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1642
1643/**
1644 * regmap_get_raw_write_max - Get the maximum size we can read
1645 *
1646 * @map: Map to check.
1647 */
1648size_t regmap_get_raw_write_max(struct regmap *map)
1649{
1650 return map->max_raw_write;
1651}
1652EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1653
07c320dc
AS
1654static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1655 unsigned int val)
1656{
1657 int ret;
1658 struct regmap_range_node *range;
1659 struct regmap *map = context;
1660
f1b5c5c3 1661 WARN_ON(!map->bus || !map->format.format_write);
07c320dc
AS
1662
1663 range = _regmap_range_lookup(map, reg);
1664 if (range) {
1665 ret = _regmap_select_page(map, &reg, range, 1);
1666 if (ret != 0)
1667 return ret;
1668 }
1669
1670 map->format.format_write(map, reg, val);
1671
c6b570d9 1672 trace_regmap_hw_write_start(map, reg, 1);
07c320dc
AS
1673
1674 ret = map->bus->write(map->bus_context, map->work_buf,
1675 map->format.buf_size);
1676
c6b570d9 1677 trace_regmap_hw_write_done(map, reg, 1);
07c320dc
AS
1678
1679 return ret;
1680}
1681
3ac17037
BB
1682static int _regmap_bus_reg_write(void *context, unsigned int reg,
1683 unsigned int val)
1684{
1685 struct regmap *map = context;
1686
1687 return map->bus->reg_write(map->bus_context, reg, val);
1688}
1689
07c320dc
AS
1690static int _regmap_bus_raw_write(void *context, unsigned int reg,
1691 unsigned int val)
1692{
1693 struct regmap *map = context;
1694
f1b5c5c3 1695 WARN_ON(!map->bus || !map->format.format_val);
07c320dc
AS
1696
1697 map->format.format_val(map->work_buf + map->format.reg_bytes
1698 + map->format.pad_bytes, val, 0);
1699 return _regmap_raw_write(map, reg,
1700 map->work_buf +
1701 map->format.reg_bytes +
1702 map->format.pad_bytes,
0a819809 1703 map->format.val_bytes);
07c320dc
AS
1704}
1705
d2a5884a
AS
1706static inline void *_regmap_map_get_context(struct regmap *map)
1707{
1708 return (map->bus) ? map : map->bus_context;
1709}
1710
4d2dc095
DP
1711int _regmap_write(struct regmap *map, unsigned int reg,
1712 unsigned int val)
b83a313b 1713{
fb2736bb 1714 int ret;
d2a5884a 1715 void *context = _regmap_map_get_context(map);
b83a313b 1716
515f2261
IN
1717 if (!regmap_writeable(map, reg))
1718 return -EIO;
1719
d2a5884a 1720 if (!map->cache_bypass && !map->defer_caching) {
5d1729e7
DP
1721 ret = regcache_write(map, reg, val);
1722 if (ret != 0)
1723 return ret;
8ae0d7e8
MB
1724 if (map->cache_only) {
1725 map->cache_dirty = true;
5d1729e7 1726 return 0;
8ae0d7e8 1727 }
5d1729e7
DP
1728 }
1729
1044c180 1730#ifdef LOG_DEVICE
5336be84 1731 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1044c180
MB
1732 dev_info(map->dev, "%x <= %x\n", reg, val);
1733#endif
1734
c6b570d9 1735 trace_regmap_reg_write(map, reg, val);
fb2736bb 1736
d2a5884a 1737 return map->reg_write(context, reg, val);
b83a313b
MB
1738}
1739
1740/**
2cf8e2df 1741 * regmap_write() - Write a value to a single register
b83a313b
MB
1742 *
1743 * @map: Register map to write to
1744 * @reg: Register to write to
1745 * @val: Value to be written
1746 *
1747 * A value of zero will be returned on success, a negative errno will
1748 * be returned in error cases.
1749 */
1750int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1751{
1752 int ret;
1753
fcac0233 1754 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f
SW
1755 return -EINVAL;
1756
0d4529c5 1757 map->lock(map->lock_arg);
b83a313b
MB
1758
1759 ret = _regmap_write(map, reg, val);
1760
0d4529c5 1761 map->unlock(map->lock_arg);
b83a313b
MB
1762
1763 return ret;
1764}
1765EXPORT_SYMBOL_GPL(regmap_write);
1766
915f441b 1767/**
2cf8e2df 1768 * regmap_write_async() - Write a value to a single register asynchronously
915f441b
MB
1769 *
1770 * @map: Register map to write to
1771 * @reg: Register to write to
1772 * @val: Value to be written
1773 *
1774 * A value of zero will be returned on success, a negative errno will
1775 * be returned in error cases.
1776 */
1777int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1778{
1779 int ret;
1780
fcac0233 1781 if (!IS_ALIGNED(reg, map->reg_stride))
915f441b
MB
1782 return -EINVAL;
1783
1784 map->lock(map->lock_arg);
1785
1786 map->async = true;
1787
1788 ret = _regmap_write(map, reg, val);
1789
1790 map->async = false;
1791
1792 map->unlock(map->lock_arg);
1793
1794 return ret;
1795}
1796EXPORT_SYMBOL_GPL(regmap_write_async);
1797
b83a313b 1798/**
2cf8e2df 1799 * regmap_raw_write() - Write raw values to one or more registers
b83a313b
MB
1800 *
1801 * @map: Register map to write to
1802 * @reg: Initial register to write to
1803 * @val: Block of data to be written, laid out for direct transmission to the
1804 * device
1805 * @val_len: Length of data pointed to by val.
1806 *
1807 * This function is intended to be used for things like firmware
1808 * download where a large block of data needs to be transferred to the
1809 * device. No formatting will be done on the data provided.
1810 *
1811 * A value of zero will be returned on success, a negative errno will
1812 * be returned in error cases.
1813 */
1814int regmap_raw_write(struct regmap *map, unsigned int reg,
1815 const void *val, size_t val_len)
1816{
1817 int ret;
1818
221ad7f2 1819 if (!regmap_can_raw_write(map))
d2a5884a 1820 return -EINVAL;
851960ba
SW
1821 if (val_len % map->format.val_bytes)
1822 return -EINVAL;
92357972 1823 if (map->max_raw_write && map->max_raw_write < val_len)
c335931e 1824 return -E2BIG;
851960ba 1825
0d4529c5 1826 map->lock(map->lock_arg);
b83a313b 1827
0a819809 1828 ret = _regmap_raw_write(map, reg, val, val_len);
b83a313b 1829
0d4529c5 1830 map->unlock(map->lock_arg);
b83a313b
MB
1831
1832 return ret;
1833}
1834EXPORT_SYMBOL_GPL(regmap_raw_write);
1835
67252287 1836/**
2cf8e2df
CK
1837 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
1838 * register field.
fdf20029
KM
1839 *
1840 * @field: Register field to write to
1841 * @mask: Bitmask to change
1842 * @val: Value to be written
28972eaa
KM
1843 * @change: Boolean indicating if a write was done
1844 * @async: Boolean indicating asynchronously
1845 * @force: Boolean indicating use force update
fdf20029 1846 *
2cf8e2df
CK
1847 * Perform a read/modify/write cycle on the register field with change,
1848 * async, force option.
1849 *
fdf20029
KM
1850 * A value of zero will be returned on success, a negative errno will
1851 * be returned in error cases.
1852 */
28972eaa
KM
1853int regmap_field_update_bits_base(struct regmap_field *field,
1854 unsigned int mask, unsigned int val,
1855 bool *change, bool async, bool force)
fdf20029
KM
1856{
1857 mask = (mask << field->shift) & field->mask;
1858
28972eaa
KM
1859 return regmap_update_bits_base(field->regmap, field->reg,
1860 mask, val << field->shift,
1861 change, async, force);
e874e6c7 1862}
28972eaa 1863EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
e874e6c7 1864
a0102375 1865/**
2cf8e2df
CK
1866 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
1867 * register field with port ID
a0102375
KM
1868 *
1869 * @field: Register field to write to
1870 * @id: port ID
1871 * @mask: Bitmask to change
1872 * @val: Value to be written
e126edec
KM
1873 * @change: Boolean indicating if a write was done
1874 * @async: Boolean indicating asynchronously
1875 * @force: Boolean indicating use force update
a0102375
KM
1876 *
1877 * A value of zero will be returned on success, a negative errno will
1878 * be returned in error cases.
1879 */
e126edec
KM
1880int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
1881 unsigned int mask, unsigned int val,
1882 bool *change, bool async, bool force)
a0102375
KM
1883{
1884 if (id >= field->id_size)
1885 return -EINVAL;
1886
1887 mask = (mask << field->shift) & field->mask;
1888
e126edec
KM
1889 return regmap_update_bits_base(field->regmap,
1890 field->reg + (field->id_offset * id),
1891 mask, val << field->shift,
1892 change, async, force);
a0102375 1893}
e126edec 1894EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
a0102375 1895
2cf8e2df
CK
1896/**
1897 * regmap_bulk_write() - Write multiple registers to the device
8eaeb219
LD
1898 *
1899 * @map: Register map to write to
1900 * @reg: First register to be write from
1901 * @val: Block of data to be written, in native register size for device
1902 * @val_count: Number of registers to write
1903 *
1904 * This function is intended to be used for writing a large block of
31b35e9e 1905 * data to the device either in single transfer or multiple transfer.
8eaeb219
LD
1906 *
1907 * A value of zero will be returned on success, a negative errno will
1908 * be returned in error cases.
1909 */
1910int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1911 size_t val_count)
1912{
1913 int ret = 0, i;
1914 size_t val_bytes = map->format.val_bytes;
adaac459 1915 size_t total_size = val_bytes * val_count;
8eaeb219 1916
fcac0233 1917 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f 1918 return -EINVAL;
8eaeb219 1919
f4298360
SB
1920 /*
1921 * Some devices don't support bulk write, for
c594b7f2
MP
1922 * them we have a series of single write operations in the first two if
1923 * blocks.
1924 *
1925 * The first if block is used for memory mapped io. It does not allow
1926 * val_bytes of 3 for example.
5bf75b44
CYT
1927 * The second one is for busses that do not provide raw I/O.
1928 * The third one is used for busses which do not have these limitations
c594b7f2 1929 * and can write arbitrary value lengths.
f4298360 1930 */
c594b7f2 1931 if (!map->bus) {
4999e962 1932 map->lock(map->lock_arg);
f4298360
SB
1933 for (i = 0; i < val_count; i++) {
1934 unsigned int ival;
1935
1936 switch (val_bytes) {
1937 case 1:
1938 ival = *(u8 *)(val + (i * val_bytes));
1939 break;
1940 case 2:
1941 ival = *(u16 *)(val + (i * val_bytes));
1942 break;
1943 case 4:
1944 ival = *(u32 *)(val + (i * val_bytes));
1945 break;
1946#ifdef CONFIG_64BIT
1947 case 8:
1948 ival = *(u64 *)(val + (i * val_bytes));
1949 break;
1950#endif
1951 default:
1952 ret = -EINVAL;
1953 goto out;
1954 }
8eaeb219 1955
ca747be2
XL
1956 ret = _regmap_write(map,
1957 reg + regmap_get_offset(map, i),
1958 ival);
f4298360
SB
1959 if (ret != 0)
1960 goto out;
1961 }
4999e962
TI
1962out:
1963 map->unlock(map->lock_arg);
5bf75b44
CYT
1964 } else if (map->bus && !map->format.parse_inplace) {
1965 const u8 *u8 = val;
1966 const u16 *u16 = val;
1967 const u32 *u32 = val;
1968 unsigned int ival;
1969
1970 for (i = 0; i < val_count; i++) {
1971 switch (map->format.val_bytes) {
1972 case 4:
1973 ival = u32[i];
1974 break;
1975 case 2:
1976 ival = u16[i];
1977 break;
1978 case 1:
1979 ival = u8[i];
1980 break;
1981 default:
1982 return -EINVAL;
1983 }
1984
1985 ret = regmap_write(map, reg + (i * map->reg_stride),
1986 ival);
1987 if (ret)
1988 return ret;
1989 }
adaac459
MP
1990 } else if (map->use_single_write ||
1991 (map->max_raw_write && map->max_raw_write < total_size)) {
1992 int chunk_stride = map->reg_stride;
1993 size_t chunk_size = val_bytes;
1994 size_t chunk_count = val_count;
1995
1996 if (!map->use_single_write) {
1997 chunk_size = map->max_raw_write;
1998 if (chunk_size % val_bytes)
1999 chunk_size -= chunk_size % val_bytes;
2000 chunk_count = total_size / chunk_size;
2001 chunk_stride *= chunk_size / val_bytes;
2002 }
2003
c594b7f2 2004 map->lock(map->lock_arg);
adaac459
MP
2005 /* Write as many bytes as possible with chunk_size */
2006 for (i = 0; i < chunk_count; i++) {
c594b7f2 2007 ret = _regmap_raw_write(map,
adaac459
MP
2008 reg + (i * chunk_stride),
2009 val + (i * chunk_size),
2010 chunk_size);
c594b7f2
MP
2011 if (ret)
2012 break;
2013 }
adaac459
MP
2014
2015 /* Write remaining bytes */
2016 if (!ret && chunk_size * i < total_size) {
2017 ret = _regmap_raw_write(map, reg + (i * chunk_stride),
2018 val + (i * chunk_size),
2019 total_size - i * chunk_size);
2020 }
c594b7f2 2021 map->unlock(map->lock_arg);
8eaeb219 2022 } else {
f4298360
SB
2023 void *wval;
2024
d6b41cb0
XL
2025 if (!val_count)
2026 return -EINVAL;
2027
b4a21fc2 2028 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
8eaeb219 2029 if (!wval) {
8eaeb219 2030 dev_err(map->dev, "Error in memory allocation\n");
4999e962 2031 return -ENOMEM;
8eaeb219
LD
2032 }
2033 for (i = 0; i < val_count * val_bytes; i += val_bytes)
8a819ff8 2034 map->format.parse_inplace(wval + i);
f4298360 2035
4999e962 2036 map->lock(map->lock_arg);
0a819809 2037 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
4999e962 2038 map->unlock(map->lock_arg);
8eaeb219 2039
8eaeb219 2040 kfree(wval);
f4298360 2041 }
8eaeb219
LD
2042 return ret;
2043}
2044EXPORT_SYMBOL_GPL(regmap_bulk_write);
2045
e894c3f4
OAO
2046/*
2047 * _regmap_raw_multi_reg_write()
2048 *
2049 * the (register,newvalue) pairs in regs have not been formatted, but
2050 * they are all in the same page and have been changed to being page
b486afbd 2051 * relative. The page register has been written if that was necessary.
e894c3f4
OAO
2052 */
2053static int _regmap_raw_multi_reg_write(struct regmap *map,
8019ff6c 2054 const struct reg_sequence *regs,
e894c3f4
OAO
2055 size_t num_regs)
2056{
2057 int ret;
2058 void *buf;
2059 int i;
2060 u8 *u8;
2061 size_t val_bytes = map->format.val_bytes;
2062 size_t reg_bytes = map->format.reg_bytes;
2063 size_t pad_bytes = map->format.pad_bytes;
2064 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2065 size_t len = pair_size * num_regs;
2066
f5727cd3
XL
2067 if (!len)
2068 return -EINVAL;
2069
e894c3f4
OAO
2070 buf = kzalloc(len, GFP_KERNEL);
2071 if (!buf)
2072 return -ENOMEM;
2073
2074 /* We have to linearise by hand. */
2075
2076 u8 = buf;
2077
2078 for (i = 0; i < num_regs; i++) {
2f9b660b
MP
2079 unsigned int reg = regs[i].reg;
2080 unsigned int val = regs[i].def;
c6b570d9 2081 trace_regmap_hw_write_start(map, reg, 1);
e894c3f4
OAO
2082 map->format.format_reg(u8, reg, map->reg_shift);
2083 u8 += reg_bytes + pad_bytes;
2084 map->format.format_val(u8, val, 0);
2085 u8 += val_bytes;
2086 }
2087 u8 = buf;
2088 *u8 |= map->write_flag_mask;
2089
2090 ret = map->bus->write(map->bus_context, buf, len);
2091
2092 kfree(buf);
2093
2094 for (i = 0; i < num_regs; i++) {
2095 int reg = regs[i].reg;
c6b570d9 2096 trace_regmap_hw_write_done(map, reg, 1);
e894c3f4
OAO
2097 }
2098 return ret;
2099}
2100
2101static unsigned int _regmap_register_page(struct regmap *map,
2102 unsigned int reg,
2103 struct regmap_range_node *range)
2104{
2105 unsigned int win_page = (reg - range->range_min) / range->window_len;
2106
2107 return win_page;
2108}
2109
2110static int _regmap_range_multi_paged_reg_write(struct regmap *map,
8019ff6c 2111 struct reg_sequence *regs,
e894c3f4
OAO
2112 size_t num_regs)
2113{
2114 int ret;
2115 int i, n;
8019ff6c 2116 struct reg_sequence *base;
b48d1398 2117 unsigned int this_page = 0;
2de9d600 2118 unsigned int page_change = 0;
e894c3f4
OAO
2119 /*
2120 * the set of registers are not neccessarily in order, but
2121 * since the order of write must be preserved this algorithm
2de9d600
NP
2122 * chops the set each time the page changes. This also applies
2123 * if there is a delay required at any point in the sequence.
e894c3f4
OAO
2124 */
2125 base = regs;
2126 for (i = 0, n = 0; i < num_regs; i++, n++) {
2127 unsigned int reg = regs[i].reg;
2128 struct regmap_range_node *range;
2129
2130 range = _regmap_range_lookup(map, reg);
2131 if (range) {
2132 unsigned int win_page = _regmap_register_page(map, reg,
2133 range);
2134
2135 if (i == 0)
2136 this_page = win_page;
2137 if (win_page != this_page) {
2138 this_page = win_page;
2de9d600
NP
2139 page_change = 1;
2140 }
2141 }
2142
2143 /* If we have both a page change and a delay make sure to
2144 * write the regs and apply the delay before we change the
2145 * page.
2146 */
2147
2148 if (page_change || regs[i].delay_us) {
2149
2150 /* For situations where the first write requires
2151 * a delay we need to make sure we don't call
2152 * raw_multi_reg_write with n=0
2153 * This can't occur with page breaks as we
2154 * never write on the first iteration
2155 */
2156 if (regs[i].delay_us && i == 0)
2157 n = 1;
2158
e894c3f4
OAO
2159 ret = _regmap_raw_multi_reg_write(map, base, n);
2160 if (ret != 0)
2161 return ret;
2de9d600
NP
2162
2163 if (regs[i].delay_us)
2164 udelay(regs[i].delay_us);
2165
e894c3f4
OAO
2166 base += n;
2167 n = 0;
2de9d600
NP
2168
2169 if (page_change) {
2170 ret = _regmap_select_page(map,
2171 &base[n].reg,
2172 range, 1);
2173 if (ret != 0)
2174 return ret;
2175
2176 page_change = 0;
2177 }
2178
e894c3f4 2179 }
2de9d600 2180
e894c3f4
OAO
2181 }
2182 if (n > 0)
2183 return _regmap_raw_multi_reg_write(map, base, n);
2184 return 0;
2185}
2186
1d5b40bc 2187static int _regmap_multi_reg_write(struct regmap *map,
8019ff6c 2188 const struct reg_sequence *regs,
e894c3f4 2189 size_t num_regs)
1d5b40bc 2190{
e894c3f4
OAO
2191 int i;
2192 int ret;
2193
2194 if (!map->can_multi_write) {
2195 for (i = 0; i < num_regs; i++) {
2196 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2197 if (ret != 0)
2198 return ret;
2de9d600
NP
2199
2200 if (regs[i].delay_us)
2201 udelay(regs[i].delay_us);
e894c3f4
OAO
2202 }
2203 return 0;
2204 }
2205
2206 if (!map->format.parse_inplace)
2207 return -EINVAL;
2208
2209 if (map->writeable_reg)
2210 for (i = 0; i < num_regs; i++) {
2211 int reg = regs[i].reg;
2212 if (!map->writeable_reg(map->dev, reg))
2213 return -EINVAL;
fcac0233 2214 if (!IS_ALIGNED(reg, map->reg_stride))
e894c3f4
OAO
2215 return -EINVAL;
2216 }
2217
2218 if (!map->cache_bypass) {
2219 for (i = 0; i < num_regs; i++) {
2220 unsigned int val = regs[i].def;
2221 unsigned int reg = regs[i].reg;
2222 ret = regcache_write(map, reg, val);
2223 if (ret) {
2224 dev_err(map->dev,
2225 "Error in caching of register: %x ret: %d\n",
2226 reg, ret);
2227 return ret;
2228 }
2229 }
2230 if (map->cache_only) {
2231 map->cache_dirty = true;
2232 return 0;
2233 }
2234 }
2235
2236 WARN_ON(!map->bus);
1d5b40bc
CK
2237
2238 for (i = 0; i < num_regs; i++) {
e894c3f4
OAO
2239 unsigned int reg = regs[i].reg;
2240 struct regmap_range_node *range;
2de9d600
NP
2241
2242 /* Coalesce all the writes between a page break or a delay
2243 * in a sequence
2244 */
e894c3f4 2245 range = _regmap_range_lookup(map, reg);
2de9d600 2246 if (range || regs[i].delay_us) {
8019ff6c
NP
2247 size_t len = sizeof(struct reg_sequence)*num_regs;
2248 struct reg_sequence *base = kmemdup(regs, len,
e894c3f4
OAO
2249 GFP_KERNEL);
2250 if (!base)
2251 return -ENOMEM;
2252 ret = _regmap_range_multi_paged_reg_write(map, base,
2253 num_regs);
2254 kfree(base);
2255
1d5b40bc
CK
2256 return ret;
2257 }
2258 }
e894c3f4 2259 return _regmap_raw_multi_reg_write(map, regs, num_regs);
1d5b40bc
CK
2260}
2261
2cf8e2df
CK
2262/**
2263 * regmap_multi_reg_write() - Write multiple registers to the device
e33fabd3
AO
2264 *
2265 * @map: Register map to write to
2266 * @regs: Array of structures containing register,value to be written
2267 * @num_regs: Number of registers to write
2268 *
2cf8e2df
CK
2269 * Write multiple registers to the device where the set of register, value
2270 * pairs are supplied in any order, possibly not all in a single range.
2271 *
e894c3f4 2272 * The 'normal' block write mode will send ultimately send data on the
2cf8e2df 2273 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
e894c3f4
OAO
2274 * addressed. However, this alternative block multi write mode will send
2275 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2276 * must of course support the mode.
e33fabd3 2277 *
e894c3f4
OAO
2278 * A value of zero will be returned on success, a negative errno will be
2279 * returned in error cases.
e33fabd3 2280 */
8019ff6c 2281int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
f7e2cec0 2282 int num_regs)
e33fabd3 2283{
1d5b40bc 2284 int ret;
e33fabd3
AO
2285
2286 map->lock(map->lock_arg);
2287
1d5b40bc
CK
2288 ret = _regmap_multi_reg_write(map, regs, num_regs);
2289
e33fabd3
AO
2290 map->unlock(map->lock_arg);
2291
2292 return ret;
2293}
2294EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2295
2cf8e2df
CK
2296/**
2297 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2298 * device but not the cache
e33fabd3
AO
2299 *
2300 * @map: Register map to write to
2301 * @regs: Array of structures containing register,value to be written
2302 * @num_regs: Number of registers to write
2303 *
2cf8e2df
CK
2304 * Write multiple registers to the device but not the cache where the set
2305 * of register are supplied in any order.
2306 *
e33fabd3
AO
2307 * This function is intended to be used for writing a large block of data
2308 * atomically to the device in single transfer for those I2C client devices
2309 * that implement this alternative block write mode.
2310 *
2311 * A value of zero will be returned on success, a negative errno will
2312 * be returned in error cases.
2313 */
1d5b40bc 2314int regmap_multi_reg_write_bypassed(struct regmap *map,
8019ff6c 2315 const struct reg_sequence *regs,
1d5b40bc 2316 int num_regs)
e33fabd3 2317{
1d5b40bc
CK
2318 int ret;
2319 bool bypass;
e33fabd3
AO
2320
2321 map->lock(map->lock_arg);
2322
1d5b40bc
CK
2323 bypass = map->cache_bypass;
2324 map->cache_bypass = true;
2325
2326 ret = _regmap_multi_reg_write(map, regs, num_regs);
2327
2328 map->cache_bypass = bypass;
2329
e33fabd3
AO
2330 map->unlock(map->lock_arg);
2331
2332 return ret;
2333}
1d5b40bc 2334EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
e33fabd3 2335
0d509f2b 2336/**
2cf8e2df
CK
2337 * regmap_raw_write_async() - Write raw values to one or more registers
2338 * asynchronously
0d509f2b
MB
2339 *
2340 * @map: Register map to write to
2341 * @reg: Initial register to write to
2342 * @val: Block of data to be written, laid out for direct transmission to the
2343 * device. Must be valid until regmap_async_complete() is called.
2344 * @val_len: Length of data pointed to by val.
2345 *
2346 * This function is intended to be used for things like firmware
2347 * download where a large block of data needs to be transferred to the
2348 * device. No formatting will be done on the data provided.
2349 *
2350 * If supported by the underlying bus the write will be scheduled
2351 * asynchronously, helping maximise I/O speed on higher speed buses
2352 * like SPI. regmap_async_complete() can be called to ensure that all
2353 * asynchrnous writes have been completed.
2354 *
2355 * A value of zero will be returned on success, a negative errno will
2356 * be returned in error cases.
2357 */
2358int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2359 const void *val, size_t val_len)
2360{
2361 int ret;
2362
2363 if (val_len % map->format.val_bytes)
2364 return -EINVAL;
fcac0233 2365 if (!IS_ALIGNED(reg, map->reg_stride))
0d509f2b
MB
2366 return -EINVAL;
2367
2368 map->lock(map->lock_arg);
2369
0a819809
MB
2370 map->async = true;
2371
2372 ret = _regmap_raw_write(map, reg, val, val_len);
2373
2374 map->async = false;
0d509f2b
MB
2375
2376 map->unlock(map->lock_arg);
2377
2378 return ret;
2379}
2380EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2381
b83a313b
MB
2382static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2383 unsigned int val_len)
2384{
98bc7dfd 2385 struct regmap_range_node *range;
b83a313b
MB
2386 int ret;
2387
f1b5c5c3 2388 WARN_ON(!map->bus);
d2a5884a 2389
bb2bb45d
MB
2390 if (!map->bus || !map->bus->read)
2391 return -EINVAL;
2392
98bc7dfd
MB
2393 range = _regmap_range_lookup(map, reg);
2394 if (range) {
2395 ret = _regmap_select_page(map, &reg, range,
2396 val_len / map->format.val_bytes);
0ff3e62f 2397 if (ret != 0)
98bc7dfd
MB
2398 return ret;
2399 }
6863ca62 2400
d939fb9a 2401 map->format.format_reg(map->work_buf, reg, map->reg_shift);
f50e38c9
TL
2402 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2403 map->read_flag_mask);
c6b570d9 2404 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
fb2736bb 2405
0135bbcc 2406 ret = map->bus->read(map->bus_context, map->work_buf,
82159ba8 2407 map->format.reg_bytes + map->format.pad_bytes,
40c5cc26 2408 val, val_len);
b83a313b 2409
c6b570d9 2410 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
fb2736bb
MB
2411
2412 return ret;
b83a313b
MB
2413}
2414
3ac17037
BB
2415static int _regmap_bus_reg_read(void *context, unsigned int reg,
2416 unsigned int *val)
2417{
2418 struct regmap *map = context;
2419
2420 return map->bus->reg_read(map->bus_context, reg, val);
2421}
2422
ad278406
AS
2423static int _regmap_bus_read(void *context, unsigned int reg,
2424 unsigned int *val)
2425{
2426 int ret;
2427 struct regmap *map = context;
2428
2429 if (!map->format.parse_val)
2430 return -EINVAL;
2431
2432 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
2433 if (ret == 0)
2434 *val = map->format.parse_val(map->work_buf);
2435
2436 return ret;
2437}
2438
b83a313b
MB
2439static int _regmap_read(struct regmap *map, unsigned int reg,
2440 unsigned int *val)
2441{
2442 int ret;
d2a5884a
AS
2443 void *context = _regmap_map_get_context(map);
2444
5d1729e7
DP
2445 if (!map->cache_bypass) {
2446 ret = regcache_read(map, reg, val);
2447 if (ret == 0)
2448 return 0;
2449 }
2450
2451 if (map->cache_only)
2452 return -EBUSY;
2453
d4807ad2
MS
2454 if (!regmap_readable(map, reg))
2455 return -EIO;
2456
d2a5884a 2457 ret = map->reg_read(context, reg, val);
fb2736bb 2458 if (ret == 0) {
1044c180 2459#ifdef LOG_DEVICE
5336be84 2460 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1044c180
MB
2461 dev_info(map->dev, "%x => %x\n", reg, *val);
2462#endif
2463
c6b570d9 2464 trace_regmap_reg_read(map, reg, *val);
b83a313b 2465
ad278406
AS
2466 if (!map->cache_bypass)
2467 regcache_write(map, reg, *val);
2468 }
f2985367 2469
b83a313b
MB
2470 return ret;
2471}
2472
2473/**
2cf8e2df 2474 * regmap_read() - Read a value from a single register
b83a313b 2475 *
0093380c 2476 * @map: Register map to read from
b83a313b
MB
2477 * @reg: Register to be read from
2478 * @val: Pointer to store read value
2479 *
2480 * A value of zero will be returned on success, a negative errno will
2481 * be returned in error cases.
2482 */
2483int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2484{
2485 int ret;
2486
fcac0233 2487 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f
SW
2488 return -EINVAL;
2489
0d4529c5 2490 map->lock(map->lock_arg);
b83a313b
MB
2491
2492 ret = _regmap_read(map, reg, val);
2493
0d4529c5 2494 map->unlock(map->lock_arg);
b83a313b
MB
2495
2496 return ret;
2497}
2498EXPORT_SYMBOL_GPL(regmap_read);
2499
2500/**
2cf8e2df 2501 * regmap_raw_read() - Read raw data from the device
b83a313b 2502 *
0093380c 2503 * @map: Register map to read from
b83a313b
MB
2504 * @reg: First register to be read from
2505 * @val: Pointer to store read value
2506 * @val_len: Size of data to read
2507 *
2508 * A value of zero will be returned on success, a negative errno will
2509 * be returned in error cases.
2510 */
2511int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2512 size_t val_len)
2513{
b8fb5ab1
MB
2514 size_t val_bytes = map->format.val_bytes;
2515 size_t val_count = val_len / val_bytes;
2516 unsigned int v;
2517 int ret, i;
04e016ad 2518
d2a5884a
AS
2519 if (!map->bus)
2520 return -EINVAL;
851960ba
SW
2521 if (val_len % map->format.val_bytes)
2522 return -EINVAL;
fcac0233 2523 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f 2524 return -EINVAL;
fa3eec77
MB
2525 if (val_count == 0)
2526 return -EINVAL;
851960ba 2527
0d4529c5 2528 map->lock(map->lock_arg);
b83a313b 2529
b8fb5ab1
MB
2530 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2531 map->cache_type == REGCACHE_NONE) {
9a16ea90
MP
2532 if (!map->bus->read) {
2533 ret = -ENOTSUPP;
2534 goto out;
2535 }
c335931e
MP
2536 if (map->max_raw_read && map->max_raw_read < val_len) {
2537 ret = -E2BIG;
2538 goto out;
2539 }
9a16ea90 2540
b8fb5ab1
MB
2541 /* Physical block read if there's no cache involved */
2542 ret = _regmap_raw_read(map, reg, val, val_len);
2543
2544 } else {
2545 /* Otherwise go word by word for the cache; should be low
2546 * cost as we expect to hit the cache.
2547 */
2548 for (i = 0; i < val_count; i++) {
ca747be2 2549 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
f01ee60f 2550 &v);
b8fb5ab1
MB
2551 if (ret != 0)
2552 goto out;
2553
d939fb9a 2554 map->format.format_val(val + (i * val_bytes), v, 0);
b8fb5ab1
MB
2555 }
2556 }
b83a313b 2557
b8fb5ab1 2558 out:
0d4529c5 2559 map->unlock(map->lock_arg);
b83a313b
MB
2560
2561 return ret;
2562}
2563EXPORT_SYMBOL_GPL(regmap_raw_read);
2564
67252287 2565/**
2cf8e2df 2566 * regmap_field_read() - Read a value to a single register field
67252287
SK
2567 *
2568 * @field: Register field to read from
2569 * @val: Pointer to store read value
2570 *
2571 * A value of zero will be returned on success, a negative errno will
2572 * be returned in error cases.
2573 */
2574int regmap_field_read(struct regmap_field *field, unsigned int *val)
2575{
2576 int ret;
2577 unsigned int reg_val;
2578 ret = regmap_read(field->regmap, field->reg, &reg_val);
2579 if (ret != 0)
2580 return ret;
2581
2582 reg_val &= field->mask;
2583 reg_val >>= field->shift;
2584 *val = reg_val;
2585
2586 return ret;
2587}
2588EXPORT_SYMBOL_GPL(regmap_field_read);
2589
a0102375 2590/**
2cf8e2df 2591 * regmap_fields_read() - Read a value to a single register field with port ID
a0102375
KM
2592 *
2593 * @field: Register field to read from
2594 * @id: port ID
2595 * @val: Pointer to store read value
2596 *
2597 * A value of zero will be returned on success, a negative errno will
2598 * be returned in error cases.
2599 */
2600int regmap_fields_read(struct regmap_field *field, unsigned int id,
2601 unsigned int *val)
2602{
2603 int ret;
2604 unsigned int reg_val;
2605
2606 if (id >= field->id_size)
2607 return -EINVAL;
2608
2609 ret = regmap_read(field->regmap,
2610 field->reg + (field->id_offset * id),
2611 &reg_val);
2612 if (ret != 0)
2613 return ret;
2614
2615 reg_val &= field->mask;
2616 reg_val >>= field->shift;
2617 *val = reg_val;
2618
2619 return ret;
2620}
2621EXPORT_SYMBOL_GPL(regmap_fields_read);
2622
b83a313b 2623/**
2cf8e2df 2624 * regmap_bulk_read() - Read multiple registers from the device
b83a313b 2625 *
0093380c 2626 * @map: Register map to read from
b83a313b
MB
2627 * @reg: First register to be read from
2628 * @val: Pointer to store read value, in native register size for device
2629 * @val_count: Number of registers to read
2630 *
2631 * A value of zero will be returned on success, a negative errno will
2632 * be returned in error cases.
2633 */
2634int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2635 size_t val_count)
2636{
2637 int ret, i;
2638 size_t val_bytes = map->format.val_bytes;
82cd9965 2639 bool vol = regmap_volatile_range(map, reg, val_count);
5d1729e7 2640
fcac0233 2641 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f 2642 return -EINVAL;
b83a313b 2643
3b58ee13 2644 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2e33caf1
AJ
2645 /*
2646 * Some devices does not support bulk read, for
2647 * them we have a series of single read operations.
2648 */
adaac459
MP
2649 size_t total_size = val_bytes * val_count;
2650
2651 if (!map->use_single_read &&
2652 (!map->max_raw_read || map->max_raw_read > total_size)) {
2e33caf1
AJ
2653 ret = regmap_raw_read(map, reg, val,
2654 val_bytes * val_count);
2655 if (ret != 0)
2656 return ret;
adaac459
MP
2657 } else {
2658 /*
2659 * Some devices do not support bulk read or do not
2660 * support large bulk reads, for them we have a series
2661 * of read operations.
2662 */
2663 int chunk_stride = map->reg_stride;
2664 size_t chunk_size = val_bytes;
2665 size_t chunk_count = val_count;
2666
2667 if (!map->use_single_read) {
2668 chunk_size = map->max_raw_read;
2669 if (chunk_size % val_bytes)
2670 chunk_size -= chunk_size % val_bytes;
2671 chunk_count = total_size / chunk_size;
2672 chunk_stride *= chunk_size / val_bytes;
2673 }
2674
2675 /* Read bytes that fit into a multiple of chunk_size */
2676 for (i = 0; i < chunk_count; i++) {
2677 ret = regmap_raw_read(map,
2678 reg + (i * chunk_stride),
2679 val + (i * chunk_size),
2680 chunk_size);
2681 if (ret != 0)
2682 return ret;
2683 }
2684
2685 /* Read remaining bytes */
2686 if (chunk_size * i < total_size) {
2687 ret = regmap_raw_read(map,
2688 reg + (i * chunk_stride),
2689 val + (i * chunk_size),
2690 total_size - i * chunk_size);
2691 if (ret != 0)
2692 return ret;
2693 }
2e33caf1 2694 }
de2d808f
MB
2695
2696 for (i = 0; i < val_count * val_bytes; i += val_bytes)
8a819ff8 2697 map->format.parse_inplace(val + i);
de2d808f
MB
2698 } else {
2699 for (i = 0; i < val_count; i++) {
6560ffd1 2700 unsigned int ival;
ca747be2 2701 ret = regmap_read(map, reg + regmap_get_offset(map, i),
25061d28 2702 &ival);
de2d808f
MB
2703 if (ret != 0)
2704 return ret;
d5b98eb1
MB
2705
2706 if (map->format.format_val) {
2707 map->format.format_val(val + (i * val_bytes), ival, 0);
2708 } else {
2709 /* Devices providing read and write
2710 * operations can use the bulk I/O
2711 * functions if they define a val_bytes,
2712 * we assume that the values are native
2713 * endian.
2714 */
19c04788 2715#ifdef CONFIG_64BIT
afcc00b9 2716 u64 *u64 = val;
19c04788 2717#endif
d5b98eb1
MB
2718 u32 *u32 = val;
2719 u16 *u16 = val;
2720 u8 *u8 = val;
2721
2722 switch (map->format.val_bytes) {
afcc00b9
XL
2723#ifdef CONFIG_64BIT
2724 case 8:
2725 u64[i] = ival;
2726 break;
2727#endif
d5b98eb1
MB
2728 case 4:
2729 u32[i] = ival;
2730 break;
2731 case 2:
2732 u16[i] = ival;
2733 break;
2734 case 1:
2735 u8[i] = ival;
2736 break;
2737 default:
2738 return -EINVAL;
2739 }
2740 }
de2d808f
MB
2741 }
2742 }
b83a313b
MB
2743
2744 return 0;
2745}
2746EXPORT_SYMBOL_GPL(regmap_bulk_read);
2747
018690d3
MB
2748static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2749 unsigned int mask, unsigned int val,
7ff0589c 2750 bool *change, bool force_write)
b83a313b
MB
2751{
2752 int ret;
d91e8db2 2753 unsigned int tmp, orig;
b83a313b 2754
77792b11
JR
2755 if (change)
2756 *change = false;
b83a313b 2757
77792b11
JR
2758 if (regmap_volatile(map, reg) && map->reg_update_bits) {
2759 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2760 if (ret == 0 && change)
e2f74dc6 2761 *change = true;
018690d3 2762 } else {
77792b11
JR
2763 ret = _regmap_read(map, reg, &orig);
2764 if (ret != 0)
2765 return ret;
2766
2767 tmp = orig & ~mask;
2768 tmp |= val & mask;
2769
2770 if (force_write || (tmp != orig)) {
2771 ret = _regmap_write(map, reg, tmp);
2772 if (ret == 0 && change)
2773 *change = true;
2774 }
018690d3 2775 }
b83a313b 2776
b83a313b
MB
2777 return ret;
2778}
018690d3
MB
2779
2780/**
2cf8e2df 2781 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
915f441b
MB
2782 *
2783 * @map: Register map to update
2784 * @reg: Register to update
2785 * @mask: Bitmask to change
2786 * @val: New value for bitmask
2787 * @change: Boolean indicating if a write was done
91d31b9f
KM
2788 * @async: Boolean indicating asynchronously
2789 * @force: Boolean indicating use force update
915f441b 2790 *
2cf8e2df
CK
2791 * Perform a read/modify/write cycle on a register map with change, async, force
2792 * options.
2793 *
2794 * If async is true:
2795 *
2796 * With most buses the read must be done synchronously so this is most useful
2797 * for devices with a cache which do not need to interact with the hardware to
2798 * determine the current register value.
915f441b
MB
2799 *
2800 * Returns zero for success, a negative number on error.
2801 */
91d31b9f
KM
2802int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2803 unsigned int mask, unsigned int val,
2804 bool *change, bool async, bool force)
915f441b
MB
2805{
2806 int ret;
2807
2808 map->lock(map->lock_arg);
2809
91d31b9f 2810 map->async = async;
915f441b 2811
91d31b9f 2812 ret = _regmap_update_bits(map, reg, mask, val, change, force);
915f441b
MB
2813
2814 map->async = false;
2815
2816 map->unlock(map->lock_arg);
2817
2818 return ret;
2819}
91d31b9f 2820EXPORT_SYMBOL_GPL(regmap_update_bits_base);
915f441b 2821
0d509f2b
MB
2822void regmap_async_complete_cb(struct regmap_async *async, int ret)
2823{
2824 struct regmap *map = async->map;
2825 bool wake;
2826
c6b570d9 2827 trace_regmap_async_io_complete(map);
fe7d4ccd 2828
0d509f2b 2829 spin_lock(&map->async_lock);
7e09a979 2830 list_move(&async->list, &map->async_free);
0d509f2b
MB
2831 wake = list_empty(&map->async_list);
2832
2833 if (ret != 0)
2834 map->async_ret = ret;
2835
2836 spin_unlock(&map->async_lock);
2837
0d509f2b
MB
2838 if (wake)
2839 wake_up(&map->async_waitq);
2840}
f804fb56 2841EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
0d509f2b
MB
2842
2843static int regmap_async_is_done(struct regmap *map)
2844{
2845 unsigned long flags;
2846 int ret;
2847
2848 spin_lock_irqsave(&map->async_lock, flags);
2849 ret = list_empty(&map->async_list);
2850 spin_unlock_irqrestore(&map->async_lock, flags);
2851
2852 return ret;
2853}
2854
2855/**
2cf8e2df 2856 * regmap_async_complete - Ensure all asynchronous I/O has completed.
0d509f2b
MB
2857 *
2858 * @map: Map to operate on.
2859 *
2860 * Blocks until any pending asynchronous I/O has completed. Returns
2861 * an error code for any failed I/O operations.
2862 */
2863int regmap_async_complete(struct regmap *map)
2864{
2865 unsigned long flags;
2866 int ret;
2867
2868 /* Nothing to do with no async support */
f2e055e7 2869 if (!map->bus || !map->bus->async_write)
0d509f2b
MB
2870 return 0;
2871
c6b570d9 2872 trace_regmap_async_complete_start(map);
fe7d4ccd 2873
0d509f2b
MB
2874 wait_event(map->async_waitq, regmap_async_is_done(map));
2875
2876 spin_lock_irqsave(&map->async_lock, flags);
2877 ret = map->async_ret;
2878 map->async_ret = 0;
2879 spin_unlock_irqrestore(&map->async_lock, flags);
2880
c6b570d9 2881 trace_regmap_async_complete_done(map);
fe7d4ccd 2882
0d509f2b
MB
2883 return ret;
2884}
f88948ef 2885EXPORT_SYMBOL_GPL(regmap_async_complete);
0d509f2b 2886
22f0d90a 2887/**
2cf8e2df
CK
2888 * regmap_register_patch - Register and apply register updates to be applied
2889 * on device initialistion
22f0d90a
MB
2890 *
2891 * @map: Register map to apply updates to.
2892 * @regs: Values to update.
2893 * @num_regs: Number of entries in regs.
2894 *
2895 * Register a set of register updates to be applied to the device
2896 * whenever the device registers are synchronised with the cache and
2897 * apply them immediately. Typically this is used to apply
2898 * corrections to be applied to the device defaults on startup, such
2899 * as the updates some vendors provide to undocumented registers.
56fb1c74
MB
2900 *
2901 * The caller must ensure that this function cannot be called
2902 * concurrently with either itself or regcache_sync().
22f0d90a 2903 */
8019ff6c 2904int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
22f0d90a
MB
2905 int num_regs)
2906{
8019ff6c 2907 struct reg_sequence *p;
6bf13103 2908 int ret;
22f0d90a
MB
2909 bool bypass;
2910
bd60e381
CZ
2911 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2912 num_regs))
2913 return 0;
2914
aab13ebc 2915 p = krealloc(map->patch,
8019ff6c 2916 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
aab13ebc
MB
2917 GFP_KERNEL);
2918 if (p) {
2919 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2920 map->patch = p;
2921 map->patch_regs += num_regs;
22f0d90a 2922 } else {
56fb1c74 2923 return -ENOMEM;
22f0d90a
MB
2924 }
2925
0d4529c5 2926 map->lock(map->lock_arg);
22f0d90a
MB
2927
2928 bypass = map->cache_bypass;
2929
2930 map->cache_bypass = true;
1a25f261 2931 map->async = true;
22f0d90a 2932
6bf13103 2933 ret = _regmap_multi_reg_write(map, regs, num_regs);
22f0d90a 2934
1a25f261 2935 map->async = false;
22f0d90a
MB
2936 map->cache_bypass = bypass;
2937
0d4529c5 2938 map->unlock(map->lock_arg);
22f0d90a 2939
1a25f261
MB
2940 regmap_async_complete(map);
2941
22f0d90a
MB
2942 return ret;
2943}
2944EXPORT_SYMBOL_GPL(regmap_register_patch);
2945
2cf8e2df
CK
2946/**
2947 * regmap_get_val_bytes() - Report the size of a register value
2948 *
2949 * @map: Register map to operate on.
a6539c32
MB
2950 *
2951 * Report the size of a register value, mainly intended to for use by
2952 * generic infrastructure built on top of regmap.
2953 */
2954int regmap_get_val_bytes(struct regmap *map)
2955{
2956 if (map->format.format_write)
2957 return -EINVAL;
2958
2959 return map->format.val_bytes;
2960}
2961EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2962
668abc72 2963/**
2cf8e2df
CK
2964 * regmap_get_max_register() - Report the max register value
2965 *
2966 * @map: Register map to operate on.
668abc72
SK
2967 *
2968 * Report the max register value, mainly intended to for use by
2969 * generic infrastructure built on top of regmap.
2970 */
2971int regmap_get_max_register(struct regmap *map)
2972{
2973 return map->max_register ? map->max_register : -EINVAL;
2974}
2975EXPORT_SYMBOL_GPL(regmap_get_max_register);
2976
a2f776cb 2977/**
2cf8e2df
CK
2978 * regmap_get_reg_stride() - Report the register address stride
2979 *
2980 * @map: Register map to operate on.
a2f776cb
SK
2981 *
2982 * Report the register address stride, mainly intended to for use by
2983 * generic infrastructure built on top of regmap.
2984 */
2985int regmap_get_reg_stride(struct regmap *map)
2986{
2987 return map->reg_stride;
2988}
2989EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
2990
13ff50c8
NC
2991int regmap_parse_val(struct regmap *map, const void *buf,
2992 unsigned int *val)
2993{
2994 if (!map->format.parse_val)
2995 return -EINVAL;
2996
2997 *val = map->format.parse_val(buf);
2998
2999 return 0;
3000}
3001EXPORT_SYMBOL_GPL(regmap_parse_val);
3002
31244e39
MB
3003static int __init regmap_initcall(void)
3004{
3005 regmap_debugfs_initcall();
3006
3007 return 0;
3008}
3009postcore_initcall(regmap_initcall);