regmap: mmio: Convert to regmap_bus and fix accessor usage
[deliverable/linux.git] / drivers / base / regmap / regmap.c
CommitLineData
b83a313b
MB
1/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
f5d6eba7 13#include <linux/device.h>
b83a313b 14#include <linux/slab.h>
19694b5e 15#include <linux/export.h>
b83a313b
MB
16#include <linux/mutex.h>
17#include <linux/err.h>
d647c199 18#include <linux/of.h>
6863ca62 19#include <linux/rbtree.h>
30b2a553 20#include <linux/sched.h>
2de9d600 21#include <linux/delay.h>
b83a313b 22
fb2736bb 23#define CREATE_TRACE_POINTS
f58078da 24#include "trace.h"
fb2736bb 25
93de9124 26#include "internal.h"
b83a313b 27
1044c180
MB
28/*
29 * Sometimes for failures during very early init the trace
30 * infrastructure isn't available early enough to be used. For this
31 * sort of problem defining LOG_DEVICE will add printks for basic
32 * register I/O on a specific device.
33 */
34#undef LOG_DEVICE
35
36static int _regmap_update_bits(struct regmap *map, unsigned int reg,
37 unsigned int mask, unsigned int val,
7ff0589c 38 bool *change, bool force_write);
1044c180 39
3ac17037
BB
40static int _regmap_bus_reg_read(void *context, unsigned int reg,
41 unsigned int *val);
ad278406
AS
42static int _regmap_bus_read(void *context, unsigned int reg,
43 unsigned int *val);
07c320dc
AS
44static int _regmap_bus_formatted_write(void *context, unsigned int reg,
45 unsigned int val);
3ac17037
BB
46static int _regmap_bus_reg_write(void *context, unsigned int reg,
47 unsigned int val);
07c320dc
AS
48static int _regmap_bus_raw_write(void *context, unsigned int reg,
49 unsigned int val);
ad278406 50
76aad392
DC
51bool regmap_reg_in_ranges(unsigned int reg,
52 const struct regmap_range *ranges,
53 unsigned int nranges)
54{
55 const struct regmap_range *r;
56 int i;
57
58 for (i = 0, r = ranges; i < nranges; i++, r++)
59 if (regmap_reg_in_range(reg, r))
60 return true;
61 return false;
62}
63EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
64
154881e5
MB
65bool regmap_check_range_table(struct regmap *map, unsigned int reg,
66 const struct regmap_access_table *table)
76aad392
DC
67{
68 /* Check "no ranges" first */
69 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
70 return false;
71
72 /* In case zero "yes ranges" are supplied, any reg is OK */
73 if (!table->n_yes_ranges)
74 return true;
75
76 return regmap_reg_in_ranges(reg, table->yes_ranges,
77 table->n_yes_ranges);
78}
154881e5 79EXPORT_SYMBOL_GPL(regmap_check_range_table);
76aad392 80
8de2f081
MB
81bool regmap_writeable(struct regmap *map, unsigned int reg)
82{
83 if (map->max_register && reg > map->max_register)
84 return false;
85
86 if (map->writeable_reg)
87 return map->writeable_reg(map->dev, reg);
88
76aad392 89 if (map->wr_table)
154881e5 90 return regmap_check_range_table(map, reg, map->wr_table);
76aad392 91
8de2f081
MB
92 return true;
93}
94
95bool regmap_readable(struct regmap *map, unsigned int reg)
96{
04dc91ce
LPC
97 if (!map->reg_read)
98 return false;
99
8de2f081
MB
100 if (map->max_register && reg > map->max_register)
101 return false;
102
4191f197
WS
103 if (map->format.format_write)
104 return false;
105
8de2f081
MB
106 if (map->readable_reg)
107 return map->readable_reg(map->dev, reg);
108
76aad392 109 if (map->rd_table)
154881e5 110 return regmap_check_range_table(map, reg, map->rd_table);
76aad392 111
8de2f081
MB
112 return true;
113}
114
115bool regmap_volatile(struct regmap *map, unsigned int reg)
116{
5844a8b9 117 if (!map->format.format_write && !regmap_readable(map, reg))
8de2f081
MB
118 return false;
119
120 if (map->volatile_reg)
121 return map->volatile_reg(map->dev, reg);
122
76aad392 123 if (map->volatile_table)
154881e5 124 return regmap_check_range_table(map, reg, map->volatile_table);
76aad392 125
b92be6fe
MB
126 if (map->cache_ops)
127 return false;
128 else
129 return true;
8de2f081
MB
130}
131
132bool regmap_precious(struct regmap *map, unsigned int reg)
133{
4191f197 134 if (!regmap_readable(map, reg))
8de2f081
MB
135 return false;
136
137 if (map->precious_reg)
138 return map->precious_reg(map->dev, reg);
139
76aad392 140 if (map->precious_table)
154881e5 141 return regmap_check_range_table(map, reg, map->precious_table);
76aad392 142
8de2f081
MB
143 return false;
144}
145
82cd9965 146static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
a8f28cfa 147 size_t num)
82cd9965
LPC
148{
149 unsigned int i;
150
151 for (i = 0; i < num; i++)
152 if (!regmap_volatile(map, reg + i))
153 return false;
154
155 return true;
156}
157
9aa50750
WS
158static void regmap_format_2_6_write(struct regmap *map,
159 unsigned int reg, unsigned int val)
160{
161 u8 *out = map->work_buf;
162
163 *out = (reg << 6) | val;
164}
165
b83a313b
MB
166static void regmap_format_4_12_write(struct regmap *map,
167 unsigned int reg, unsigned int val)
168{
169 __be16 *out = map->work_buf;
170 *out = cpu_to_be16((reg << 12) | val);
171}
172
173static void regmap_format_7_9_write(struct regmap *map,
174 unsigned int reg, unsigned int val)
175{
176 __be16 *out = map->work_buf;
177 *out = cpu_to_be16((reg << 9) | val);
178}
179
7e5ec63e
LPC
180static void regmap_format_10_14_write(struct regmap *map,
181 unsigned int reg, unsigned int val)
182{
183 u8 *out = map->work_buf;
184
185 out[2] = val;
186 out[1] = (val >> 8) | (reg << 6);
187 out[0] = reg >> 2;
188}
189
d939fb9a 190static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
b83a313b
MB
191{
192 u8 *b = buf;
193
d939fb9a 194 b[0] = val << shift;
b83a313b
MB
195}
196
141eba2e 197static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
b83a313b
MB
198{
199 __be16 *b = buf;
200
d939fb9a 201 b[0] = cpu_to_be16(val << shift);
b83a313b
MB
202}
203
4aa8c069
XL
204static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
205{
206 __le16 *b = buf;
207
208 b[0] = cpu_to_le16(val << shift);
209}
210
141eba2e
SW
211static void regmap_format_16_native(void *buf, unsigned int val,
212 unsigned int shift)
213{
214 *(u16 *)buf = val << shift;
215}
216
d939fb9a 217static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
ea279fc5
MR
218{
219 u8 *b = buf;
220
d939fb9a
MR
221 val <<= shift;
222
ea279fc5
MR
223 b[0] = val >> 16;
224 b[1] = val >> 8;
225 b[2] = val;
226}
227
141eba2e 228static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
7d5e525b
MB
229{
230 __be32 *b = buf;
231
d939fb9a 232 b[0] = cpu_to_be32(val << shift);
7d5e525b
MB
233}
234
4aa8c069
XL
235static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
236{
237 __le32 *b = buf;
238
239 b[0] = cpu_to_le32(val << shift);
240}
241
141eba2e
SW
242static void regmap_format_32_native(void *buf, unsigned int val,
243 unsigned int shift)
244{
245 *(u32 *)buf = val << shift;
246}
247
afcc00b9
XL
248#ifdef CONFIG_64BIT
249static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
250{
251 __be64 *b = buf;
252
01c377bf 253 b[0] = cpu_to_be64((u64)val << shift);
afcc00b9
XL
254}
255
256static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
257{
258 __le64 *b = buf;
259
01c377bf 260 b[0] = cpu_to_le64((u64)val << shift);
afcc00b9
XL
261}
262
263static void regmap_format_64_native(void *buf, unsigned int val,
264 unsigned int shift)
265{
01c377bf 266 *(u64 *)buf = (u64)val << shift;
afcc00b9
XL
267}
268#endif
269
8a819ff8 270static void regmap_parse_inplace_noop(void *buf)
b83a313b 271{
8a819ff8
MB
272}
273
274static unsigned int regmap_parse_8(const void *buf)
275{
276 const u8 *b = buf;
b83a313b
MB
277
278 return b[0];
279}
280
8a819ff8
MB
281static unsigned int regmap_parse_16_be(const void *buf)
282{
283 const __be16 *b = buf;
284
285 return be16_to_cpu(b[0]);
286}
287
4aa8c069
XL
288static unsigned int regmap_parse_16_le(const void *buf)
289{
290 const __le16 *b = buf;
291
292 return le16_to_cpu(b[0]);
293}
294
8a819ff8 295static void regmap_parse_16_be_inplace(void *buf)
b83a313b
MB
296{
297 __be16 *b = buf;
298
299 b[0] = be16_to_cpu(b[0]);
b83a313b
MB
300}
301
4aa8c069
XL
302static void regmap_parse_16_le_inplace(void *buf)
303{
304 __le16 *b = buf;
305
306 b[0] = le16_to_cpu(b[0]);
307}
308
8a819ff8 309static unsigned int regmap_parse_16_native(const void *buf)
141eba2e
SW
310{
311 return *(u16 *)buf;
312}
313
8a819ff8 314static unsigned int regmap_parse_24(const void *buf)
ea279fc5 315{
8a819ff8 316 const u8 *b = buf;
ea279fc5
MR
317 unsigned int ret = b[2];
318 ret |= ((unsigned int)b[1]) << 8;
319 ret |= ((unsigned int)b[0]) << 16;
320
321 return ret;
322}
323
8a819ff8
MB
324static unsigned int regmap_parse_32_be(const void *buf)
325{
326 const __be32 *b = buf;
327
328 return be32_to_cpu(b[0]);
329}
330
4aa8c069
XL
331static unsigned int regmap_parse_32_le(const void *buf)
332{
333 const __le32 *b = buf;
334
335 return le32_to_cpu(b[0]);
336}
337
8a819ff8 338static void regmap_parse_32_be_inplace(void *buf)
7d5e525b
MB
339{
340 __be32 *b = buf;
341
342 b[0] = be32_to_cpu(b[0]);
7d5e525b
MB
343}
344
4aa8c069
XL
345static void regmap_parse_32_le_inplace(void *buf)
346{
347 __le32 *b = buf;
348
349 b[0] = le32_to_cpu(b[0]);
350}
351
8a819ff8 352static unsigned int regmap_parse_32_native(const void *buf)
141eba2e
SW
353{
354 return *(u32 *)buf;
355}
356
afcc00b9
XL
357#ifdef CONFIG_64BIT
358static unsigned int regmap_parse_64_be(const void *buf)
359{
360 const __be64 *b = buf;
361
362 return be64_to_cpu(b[0]);
363}
364
365static unsigned int regmap_parse_64_le(const void *buf)
366{
367 const __le64 *b = buf;
368
369 return le64_to_cpu(b[0]);
370}
371
372static void regmap_parse_64_be_inplace(void *buf)
373{
374 __be64 *b = buf;
375
376 b[0] = be64_to_cpu(b[0]);
377}
378
379static void regmap_parse_64_le_inplace(void *buf)
380{
381 __le64 *b = buf;
382
383 b[0] = le64_to_cpu(b[0]);
384}
385
386static unsigned int regmap_parse_64_native(const void *buf)
387{
388 return *(u64 *)buf;
389}
390#endif
391
0d4529c5 392static void regmap_lock_mutex(void *__map)
bacdbe07 393{
0d4529c5 394 struct regmap *map = __map;
bacdbe07
SW
395 mutex_lock(&map->mutex);
396}
397
0d4529c5 398static void regmap_unlock_mutex(void *__map)
bacdbe07 399{
0d4529c5 400 struct regmap *map = __map;
bacdbe07
SW
401 mutex_unlock(&map->mutex);
402}
403
0d4529c5 404static void regmap_lock_spinlock(void *__map)
b4519c71 405__acquires(&map->spinlock)
bacdbe07 406{
0d4529c5 407 struct regmap *map = __map;
92ab1aab
LPC
408 unsigned long flags;
409
410 spin_lock_irqsave(&map->spinlock, flags);
411 map->spinlock_flags = flags;
bacdbe07
SW
412}
413
0d4529c5 414static void regmap_unlock_spinlock(void *__map)
b4519c71 415__releases(&map->spinlock)
bacdbe07 416{
0d4529c5 417 struct regmap *map = __map;
92ab1aab 418 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
bacdbe07
SW
419}
420
72b39f6f
MB
421static void dev_get_regmap_release(struct device *dev, void *res)
422{
423 /*
424 * We don't actually have anything to do here; the goal here
425 * is not to manage the regmap but to provide a simple way to
426 * get the regmap back given a struct device.
427 */
428}
429
6863ca62
KG
430static bool _regmap_range_add(struct regmap *map,
431 struct regmap_range_node *data)
432{
433 struct rb_root *root = &map->range_tree;
434 struct rb_node **new = &(root->rb_node), *parent = NULL;
435
436 while (*new) {
437 struct regmap_range_node *this =
438 container_of(*new, struct regmap_range_node, node);
439
440 parent = *new;
441 if (data->range_max < this->range_min)
442 new = &((*new)->rb_left);
443 else if (data->range_min > this->range_max)
444 new = &((*new)->rb_right);
445 else
446 return false;
447 }
448
449 rb_link_node(&data->node, parent, new);
450 rb_insert_color(&data->node, root);
451
452 return true;
453}
454
455static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
456 unsigned int reg)
457{
458 struct rb_node *node = map->range_tree.rb_node;
459
460 while (node) {
461 struct regmap_range_node *this =
462 container_of(node, struct regmap_range_node, node);
463
464 if (reg < this->range_min)
465 node = node->rb_left;
466 else if (reg > this->range_max)
467 node = node->rb_right;
468 else
469 return this;
470 }
471
472 return NULL;
473}
474
475static void regmap_range_exit(struct regmap *map)
476{
477 struct rb_node *next;
478 struct regmap_range_node *range_node;
479
480 next = rb_first(&map->range_tree);
481 while (next) {
482 range_node = rb_entry(next, struct regmap_range_node, node);
483 next = rb_next(&range_node->node);
484 rb_erase(&range_node->node, &map->range_tree);
485 kfree(range_node);
486 }
487
488 kfree(map->selector_work_buf);
489}
490
6cfec04b
MS
491int regmap_attach_dev(struct device *dev, struct regmap *map,
492 const struct regmap_config *config)
493{
494 struct regmap **m;
495
496 map->dev = dev;
497
498 regmap_debugfs_init(map, config->name);
499
500 /* Add a devres resource for dev_get_regmap() */
501 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
502 if (!m) {
503 regmap_debugfs_exit(map);
504 return -ENOMEM;
505 }
506 *m = map;
507 devres_add(dev, m);
508
509 return 0;
510}
511EXPORT_SYMBOL_GPL(regmap_attach_dev);
512
cf673fbc
GU
513static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
514 const struct regmap_config *config)
515{
516 enum regmap_endian endian;
517
518 /* Retrieve the endianness specification from the regmap config */
519 endian = config->reg_format_endian;
520
521 /* If the regmap config specified a non-default value, use that */
522 if (endian != REGMAP_ENDIAN_DEFAULT)
523 return endian;
524
525 /* Retrieve the endianness specification from the bus config */
526 if (bus && bus->reg_format_endian_default)
527 endian = bus->reg_format_endian_default;
d647c199 528
cf673fbc
GU
529 /* If the bus specified a non-default value, use that */
530 if (endian != REGMAP_ENDIAN_DEFAULT)
531 return endian;
532
533 /* Use this if no other value was found */
534 return REGMAP_ENDIAN_BIG;
535}
536
3c174d29
GR
537enum regmap_endian regmap_get_val_endian(struct device *dev,
538 const struct regmap_bus *bus,
539 const struct regmap_config *config)
d647c199 540{
6e64b6cc 541 struct device_node *np;
cf673fbc 542 enum regmap_endian endian;
d647c199 543
45e1a279 544 /* Retrieve the endianness specification from the regmap config */
cf673fbc 545 endian = config->val_format_endian;
d647c199 546
45e1a279 547 /* If the regmap config specified a non-default value, use that */
cf673fbc
GU
548 if (endian != REGMAP_ENDIAN_DEFAULT)
549 return endian;
d647c199 550
6e64b6cc
PD
551 /* If the dev and dev->of_node exist try to get endianness from DT */
552 if (dev && dev->of_node) {
553 np = dev->of_node;
d647c199 554
6e64b6cc
PD
555 /* Parse the device's DT node for an endianness specification */
556 if (of_property_read_bool(np, "big-endian"))
557 endian = REGMAP_ENDIAN_BIG;
558 else if (of_property_read_bool(np, "little-endian"))
559 endian = REGMAP_ENDIAN_LITTLE;
a06c488d
MB
560 else if (of_property_read_bool(np, "native-endian"))
561 endian = REGMAP_ENDIAN_NATIVE;
6e64b6cc
PD
562
563 /* If the endianness was specified in DT, use that */
564 if (endian != REGMAP_ENDIAN_DEFAULT)
565 return endian;
566 }
45e1a279
SW
567
568 /* Retrieve the endianness specification from the bus config */
cf673fbc
GU
569 if (bus && bus->val_format_endian_default)
570 endian = bus->val_format_endian_default;
d647c199 571
45e1a279 572 /* If the bus specified a non-default value, use that */
cf673fbc
GU
573 if (endian != REGMAP_ENDIAN_DEFAULT)
574 return endian;
45e1a279
SW
575
576 /* Use this if no other value was found */
cf673fbc 577 return REGMAP_ENDIAN_BIG;
d647c199 578}
3c174d29 579EXPORT_SYMBOL_GPL(regmap_get_val_endian);
d647c199 580
3cfe7a74
NB
581struct regmap *__regmap_init(struct device *dev,
582 const struct regmap_bus *bus,
583 void *bus_context,
584 const struct regmap_config *config,
585 struct lock_class_key *lock_key,
586 const char *lock_name)
b83a313b 587{
6cfec04b 588 struct regmap *map;
b83a313b 589 int ret = -EINVAL;
141eba2e 590 enum regmap_endian reg_endian, val_endian;
6863ca62 591 int i, j;
b83a313b 592
d2a5884a 593 if (!config)
abbb18fb 594 goto err;
b83a313b
MB
595
596 map = kzalloc(sizeof(*map), GFP_KERNEL);
597 if (map == NULL) {
598 ret = -ENOMEM;
599 goto err;
600 }
601
0d4529c5
DC
602 if (config->lock && config->unlock) {
603 map->lock = config->lock;
604 map->unlock = config->unlock;
605 map->lock_arg = config->lock_arg;
bacdbe07 606 } else {
d2a5884a
AS
607 if ((bus && bus->fast_io) ||
608 config->fast_io) {
0d4529c5
DC
609 spin_lock_init(&map->spinlock);
610 map->lock = regmap_lock_spinlock;
611 map->unlock = regmap_unlock_spinlock;
3cfe7a74
NB
612 lockdep_set_class_and_name(&map->spinlock,
613 lock_key, lock_name);
0d4529c5
DC
614 } else {
615 mutex_init(&map->mutex);
616 map->lock = regmap_lock_mutex;
617 map->unlock = regmap_unlock_mutex;
3cfe7a74
NB
618 lockdep_set_class_and_name(&map->mutex,
619 lock_key, lock_name);
0d4529c5
DC
620 }
621 map->lock_arg = map;
bacdbe07 622 }
b4a21fc2
SB
623
624 /*
625 * When we write in fast-paths with regmap_bulk_write() don't allocate
626 * scratch buffers with sleeping allocations.
627 */
628 if ((bus && bus->fast_io) || config->fast_io)
629 map->alloc_flags = GFP_ATOMIC;
630 else
631 map->alloc_flags = GFP_KERNEL;
632
c212accc 633 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
82159ba8 634 map->format.pad_bytes = config->pad_bits / 8;
c212accc 635 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
5494a98f
FE
636 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
637 config->val_bits + config->pad_bits, 8);
d939fb9a 638 map->reg_shift = config->pad_bits % 8;
f01ee60f
SW
639 if (config->reg_stride)
640 map->reg_stride = config->reg_stride;
641 else
642 map->reg_stride = 1;
67921a1a
MP
643 map->use_single_read = config->use_single_rw || !bus || !bus->read;
644 map->use_single_write = config->use_single_rw || !bus || !bus->write;
9c9f7f67 645 map->can_multi_write = config->can_multi_write && bus && bus->write;
17649c90
SS
646 if (bus) {
647 map->max_raw_read = bus->max_raw_read;
648 map->max_raw_write = bus->max_raw_write;
649 }
b83a313b
MB
650 map->dev = dev;
651 map->bus = bus;
0135bbcc 652 map->bus_context = bus_context;
2e2ae66d 653 map->max_register = config->max_register;
76aad392
DC
654 map->wr_table = config->wr_table;
655 map->rd_table = config->rd_table;
656 map->volatile_table = config->volatile_table;
657 map->precious_table = config->precious_table;
2e2ae66d
MB
658 map->writeable_reg = config->writeable_reg;
659 map->readable_reg = config->readable_reg;
660 map->volatile_reg = config->volatile_reg;
2efe1642 661 map->precious_reg = config->precious_reg;
5d1729e7 662 map->cache_type = config->cache_type;
72b39f6f 663 map->name = config->name;
b83a313b 664
0d509f2b
MB
665 spin_lock_init(&map->async_lock);
666 INIT_LIST_HEAD(&map->async_list);
7e09a979 667 INIT_LIST_HEAD(&map->async_free);
0d509f2b
MB
668 init_waitqueue_head(&map->async_waitq);
669
6f306441
LPC
670 if (config->read_flag_mask || config->write_flag_mask) {
671 map->read_flag_mask = config->read_flag_mask;
672 map->write_flag_mask = config->write_flag_mask;
d2a5884a 673 } else if (bus) {
6f306441
LPC
674 map->read_flag_mask = bus->read_flag_mask;
675 }
676
d2a5884a
AS
677 if (!bus) {
678 map->reg_read = config->reg_read;
679 map->reg_write = config->reg_write;
680
3ac17037
BB
681 map->defer_caching = false;
682 goto skip_format_initialization;
683 } else if (!bus->read || !bus->write) {
684 map->reg_read = _regmap_bus_reg_read;
685 map->reg_write = _regmap_bus_reg_write;
686
d2a5884a
AS
687 map->defer_caching = false;
688 goto skip_format_initialization;
689 } else {
690 map->reg_read = _regmap_bus_read;
77792b11 691 map->reg_update_bits = bus->reg_update_bits;
d2a5884a 692 }
ad278406 693
cf673fbc
GU
694 reg_endian = regmap_get_reg_endian(bus, config);
695 val_endian = regmap_get_val_endian(dev, bus, config);
141eba2e 696
d939fb9a 697 switch (config->reg_bits + map->reg_shift) {
9aa50750
WS
698 case 2:
699 switch (config->val_bits) {
700 case 6:
701 map->format.format_write = regmap_format_2_6_write;
702 break;
703 default:
704 goto err_map;
705 }
706 break;
707
b83a313b
MB
708 case 4:
709 switch (config->val_bits) {
710 case 12:
711 map->format.format_write = regmap_format_4_12_write;
712 break;
713 default:
714 goto err_map;
715 }
716 break;
717
718 case 7:
719 switch (config->val_bits) {
720 case 9:
721 map->format.format_write = regmap_format_7_9_write;
722 break;
723 default:
724 goto err_map;
725 }
726 break;
727
7e5ec63e
LPC
728 case 10:
729 switch (config->val_bits) {
730 case 14:
731 map->format.format_write = regmap_format_10_14_write;
732 break;
733 default:
734 goto err_map;
735 }
736 break;
737
b83a313b
MB
738 case 8:
739 map->format.format_reg = regmap_format_8;
740 break;
741
742 case 16:
141eba2e
SW
743 switch (reg_endian) {
744 case REGMAP_ENDIAN_BIG:
745 map->format.format_reg = regmap_format_16_be;
746 break;
747 case REGMAP_ENDIAN_NATIVE:
748 map->format.format_reg = regmap_format_16_native;
749 break;
750 default:
751 goto err_map;
752 }
b83a313b
MB
753 break;
754
237019e7
LPC
755 case 24:
756 if (reg_endian != REGMAP_ENDIAN_BIG)
757 goto err_map;
758 map->format.format_reg = regmap_format_24;
759 break;
760
7d5e525b 761 case 32:
141eba2e
SW
762 switch (reg_endian) {
763 case REGMAP_ENDIAN_BIG:
764 map->format.format_reg = regmap_format_32_be;
765 break;
766 case REGMAP_ENDIAN_NATIVE:
767 map->format.format_reg = regmap_format_32_native;
768 break;
769 default:
770 goto err_map;
771 }
7d5e525b
MB
772 break;
773
afcc00b9
XL
774#ifdef CONFIG_64BIT
775 case 64:
776 switch (reg_endian) {
777 case REGMAP_ENDIAN_BIG:
778 map->format.format_reg = regmap_format_64_be;
779 break;
780 case REGMAP_ENDIAN_NATIVE:
781 map->format.format_reg = regmap_format_64_native;
782 break;
783 default:
784 goto err_map;
785 }
786 break;
787#endif
788
b83a313b
MB
789 default:
790 goto err_map;
791 }
792
8a819ff8
MB
793 if (val_endian == REGMAP_ENDIAN_NATIVE)
794 map->format.parse_inplace = regmap_parse_inplace_noop;
795
b83a313b
MB
796 switch (config->val_bits) {
797 case 8:
798 map->format.format_val = regmap_format_8;
799 map->format.parse_val = regmap_parse_8;
8a819ff8 800 map->format.parse_inplace = regmap_parse_inplace_noop;
b83a313b
MB
801 break;
802 case 16:
141eba2e
SW
803 switch (val_endian) {
804 case REGMAP_ENDIAN_BIG:
805 map->format.format_val = regmap_format_16_be;
806 map->format.parse_val = regmap_parse_16_be;
8a819ff8 807 map->format.parse_inplace = regmap_parse_16_be_inplace;
141eba2e 808 break;
4aa8c069
XL
809 case REGMAP_ENDIAN_LITTLE:
810 map->format.format_val = regmap_format_16_le;
811 map->format.parse_val = regmap_parse_16_le;
812 map->format.parse_inplace = regmap_parse_16_le_inplace;
813 break;
141eba2e
SW
814 case REGMAP_ENDIAN_NATIVE:
815 map->format.format_val = regmap_format_16_native;
816 map->format.parse_val = regmap_parse_16_native;
817 break;
818 default:
819 goto err_map;
820 }
b83a313b 821 break;
ea279fc5 822 case 24:
141eba2e
SW
823 if (val_endian != REGMAP_ENDIAN_BIG)
824 goto err_map;
ea279fc5
MR
825 map->format.format_val = regmap_format_24;
826 map->format.parse_val = regmap_parse_24;
827 break;
7d5e525b 828 case 32:
141eba2e
SW
829 switch (val_endian) {
830 case REGMAP_ENDIAN_BIG:
831 map->format.format_val = regmap_format_32_be;
832 map->format.parse_val = regmap_parse_32_be;
8a819ff8 833 map->format.parse_inplace = regmap_parse_32_be_inplace;
141eba2e 834 break;
4aa8c069
XL
835 case REGMAP_ENDIAN_LITTLE:
836 map->format.format_val = regmap_format_32_le;
837 map->format.parse_val = regmap_parse_32_le;
838 map->format.parse_inplace = regmap_parse_32_le_inplace;
839 break;
141eba2e
SW
840 case REGMAP_ENDIAN_NATIVE:
841 map->format.format_val = regmap_format_32_native;
842 map->format.parse_val = regmap_parse_32_native;
843 break;
844 default:
845 goto err_map;
846 }
7d5e525b 847 break;
afcc00b9 848#ifdef CONFIG_64BIT
782035ea 849 case 64:
afcc00b9
XL
850 switch (val_endian) {
851 case REGMAP_ENDIAN_BIG:
852 map->format.format_val = regmap_format_64_be;
853 map->format.parse_val = regmap_parse_64_be;
854 map->format.parse_inplace = regmap_parse_64_be_inplace;
855 break;
856 case REGMAP_ENDIAN_LITTLE:
857 map->format.format_val = regmap_format_64_le;
858 map->format.parse_val = regmap_parse_64_le;
859 map->format.parse_inplace = regmap_parse_64_le_inplace;
860 break;
861 case REGMAP_ENDIAN_NATIVE:
862 map->format.format_val = regmap_format_64_native;
863 map->format.parse_val = regmap_parse_64_native;
864 break;
865 default:
866 goto err_map;
867 }
868 break;
869#endif
b83a313b
MB
870 }
871
141eba2e
SW
872 if (map->format.format_write) {
873 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
874 (val_endian != REGMAP_ENDIAN_BIG))
875 goto err_map;
67921a1a 876 map->use_single_write = true;
141eba2e 877 }
7a647614 878
b83a313b
MB
879 if (!map->format.format_write &&
880 !(map->format.format_reg && map->format.format_val))
881 goto err_map;
882
82159ba8 883 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
b83a313b
MB
884 if (map->work_buf == NULL) {
885 ret = -ENOMEM;
5204f5e3 886 goto err_map;
b83a313b
MB
887 }
888
d2a5884a
AS
889 if (map->format.format_write) {
890 map->defer_caching = false;
07c320dc 891 map->reg_write = _regmap_bus_formatted_write;
d2a5884a
AS
892 } else if (map->format.format_val) {
893 map->defer_caching = true;
07c320dc 894 map->reg_write = _regmap_bus_raw_write;
d2a5884a
AS
895 }
896
897skip_format_initialization:
07c320dc 898
6863ca62 899 map->range_tree = RB_ROOT;
e3549cd0 900 for (i = 0; i < config->num_ranges; i++) {
6863ca62
KG
901 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
902 struct regmap_range_node *new;
903
904 /* Sanity check */
061adc06
MB
905 if (range_cfg->range_max < range_cfg->range_min) {
906 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
907 range_cfg->range_max, range_cfg->range_min);
6863ca62 908 goto err_range;
061adc06
MB
909 }
910
911 if (range_cfg->range_max > map->max_register) {
912 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
913 range_cfg->range_max, map->max_register);
914 goto err_range;
915 }
916
917 if (range_cfg->selector_reg > map->max_register) {
918 dev_err(map->dev,
919 "Invalid range %d: selector out of map\n", i);
920 goto err_range;
921 }
922
923 if (range_cfg->window_len == 0) {
924 dev_err(map->dev, "Invalid range %d: window_len 0\n",
925 i);
926 goto err_range;
927 }
6863ca62
KG
928
929 /* Make sure, that this register range has no selector
930 or data window within its boundary */
e3549cd0 931 for (j = 0; j < config->num_ranges; j++) {
6863ca62
KG
932 unsigned sel_reg = config->ranges[j].selector_reg;
933 unsigned win_min = config->ranges[j].window_start;
934 unsigned win_max = win_min +
935 config->ranges[j].window_len - 1;
936
f161d220
PZ
937 /* Allow data window inside its own virtual range */
938 if (j == i)
939 continue;
940
6863ca62
KG
941 if (range_cfg->range_min <= sel_reg &&
942 sel_reg <= range_cfg->range_max) {
061adc06
MB
943 dev_err(map->dev,
944 "Range %d: selector for %d in window\n",
945 i, j);
6863ca62
KG
946 goto err_range;
947 }
948
949 if (!(win_max < range_cfg->range_min ||
950 win_min > range_cfg->range_max)) {
061adc06
MB
951 dev_err(map->dev,
952 "Range %d: window for %d in window\n",
953 i, j);
6863ca62
KG
954 goto err_range;
955 }
956 }
957
958 new = kzalloc(sizeof(*new), GFP_KERNEL);
959 if (new == NULL) {
960 ret = -ENOMEM;
961 goto err_range;
962 }
963
4b020b3f 964 new->map = map;
d058bb49 965 new->name = range_cfg->name;
6863ca62
KG
966 new->range_min = range_cfg->range_min;
967 new->range_max = range_cfg->range_max;
968 new->selector_reg = range_cfg->selector_reg;
969 new->selector_mask = range_cfg->selector_mask;
970 new->selector_shift = range_cfg->selector_shift;
971 new->window_start = range_cfg->window_start;
972 new->window_len = range_cfg->window_len;
973
53e87f88 974 if (!_regmap_range_add(map, new)) {
061adc06 975 dev_err(map->dev, "Failed to add range %d\n", i);
6863ca62
KG
976 kfree(new);
977 goto err_range;
978 }
979
980 if (map->selector_work_buf == NULL) {
981 map->selector_work_buf =
982 kzalloc(map->format.buf_size, GFP_KERNEL);
983 if (map->selector_work_buf == NULL) {
984 ret = -ENOMEM;
985 goto err_range;
986 }
987 }
988 }
052d2cd1 989
e5e3b8ab 990 ret = regcache_init(map, config);
0ff3e62f 991 if (ret != 0)
6863ca62
KG
992 goto err_range;
993
a7a037c8 994 if (dev) {
6cfec04b
MS
995 ret = regmap_attach_dev(dev, map, config);
996 if (ret != 0)
997 goto err_regcache;
a7a037c8 998 }
72b39f6f 999
b83a313b
MB
1000 return map;
1001
6cfec04b 1002err_regcache:
72b39f6f 1003 regcache_exit(map);
6863ca62
KG
1004err_range:
1005 regmap_range_exit(map);
58072cbf 1006 kfree(map->work_buf);
b83a313b
MB
1007err_map:
1008 kfree(map);
1009err:
1010 return ERR_PTR(ret);
1011}
3cfe7a74 1012EXPORT_SYMBOL_GPL(__regmap_init);
b83a313b 1013
c0eb4676
MB
1014static void devm_regmap_release(struct device *dev, void *res)
1015{
1016 regmap_exit(*(struct regmap **)res);
1017}
1018
3cfe7a74
NB
1019struct regmap *__devm_regmap_init(struct device *dev,
1020 const struct regmap_bus *bus,
1021 void *bus_context,
1022 const struct regmap_config *config,
1023 struct lock_class_key *lock_key,
1024 const char *lock_name)
c0eb4676
MB
1025{
1026 struct regmap **ptr, *regmap;
1027
1028 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1029 if (!ptr)
1030 return ERR_PTR(-ENOMEM);
1031
3cfe7a74
NB
1032 regmap = __regmap_init(dev, bus, bus_context, config,
1033 lock_key, lock_name);
c0eb4676
MB
1034 if (!IS_ERR(regmap)) {
1035 *ptr = regmap;
1036 devres_add(dev, ptr);
1037 } else {
1038 devres_free(ptr);
1039 }
1040
1041 return regmap;
1042}
3cfe7a74 1043EXPORT_SYMBOL_GPL(__devm_regmap_init);
c0eb4676 1044
67252287
SK
1045static void regmap_field_init(struct regmap_field *rm_field,
1046 struct regmap *regmap, struct reg_field reg_field)
1047{
67252287
SK
1048 rm_field->regmap = regmap;
1049 rm_field->reg = reg_field.reg;
1050 rm_field->shift = reg_field.lsb;
921cc294 1051 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
a0102375
KM
1052 rm_field->id_size = reg_field.id_size;
1053 rm_field->id_offset = reg_field.id_offset;
67252287
SK
1054}
1055
1056/**
1057 * devm_regmap_field_alloc(): Allocate and initialise a register field
1058 * in a register map.
1059 *
1060 * @dev: Device that will be interacted with
1061 * @regmap: regmap bank in which this register field is located.
1062 * @reg_field: Register field with in the bank.
1063 *
1064 * The return value will be an ERR_PTR() on error or a valid pointer
1065 * to a struct regmap_field. The regmap_field will be automatically freed
1066 * by the device management code.
1067 */
1068struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1069 struct regmap *regmap, struct reg_field reg_field)
1070{
1071 struct regmap_field *rm_field = devm_kzalloc(dev,
1072 sizeof(*rm_field), GFP_KERNEL);
1073 if (!rm_field)
1074 return ERR_PTR(-ENOMEM);
1075
1076 regmap_field_init(rm_field, regmap, reg_field);
1077
1078 return rm_field;
1079
1080}
1081EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1082
1083/**
1084 * devm_regmap_field_free(): Free register field allocated using
1085 * devm_regmap_field_alloc. Usally drivers need not call this function,
1086 * as the memory allocated via devm will be freed as per device-driver
1087 * life-cyle.
1088 *
1089 * @dev: Device that will be interacted with
1090 * @field: regmap field which should be freed.
1091 */
1092void devm_regmap_field_free(struct device *dev,
1093 struct regmap_field *field)
1094{
1095 devm_kfree(dev, field);
1096}
1097EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1098
1099/**
1100 * regmap_field_alloc(): Allocate and initialise a register field
1101 * in a register map.
1102 *
1103 * @regmap: regmap bank in which this register field is located.
1104 * @reg_field: Register field with in the bank.
1105 *
1106 * The return value will be an ERR_PTR() on error or a valid pointer
1107 * to a struct regmap_field. The regmap_field should be freed by the
1108 * user once its finished working with it using regmap_field_free().
1109 */
1110struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1111 struct reg_field reg_field)
1112{
1113 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1114
1115 if (!rm_field)
1116 return ERR_PTR(-ENOMEM);
1117
1118 regmap_field_init(rm_field, regmap, reg_field);
1119
1120 return rm_field;
1121}
1122EXPORT_SYMBOL_GPL(regmap_field_alloc);
1123
1124/**
1125 * regmap_field_free(): Free register field allocated using regmap_field_alloc
1126 *
1127 * @field: regmap field which should be freed.
1128 */
1129void regmap_field_free(struct regmap_field *field)
1130{
1131 kfree(field);
1132}
1133EXPORT_SYMBOL_GPL(regmap_field_free);
1134
bf315173
MB
1135/**
1136 * regmap_reinit_cache(): Reinitialise the current register cache
1137 *
1138 * @map: Register map to operate on.
1139 * @config: New configuration. Only the cache data will be used.
1140 *
1141 * Discard any existing register cache for the map and initialize a
1142 * new cache. This can be used to restore the cache to defaults or to
1143 * update the cache configuration to reflect runtime discovery of the
1144 * hardware.
4d879514
DP
1145 *
1146 * No explicit locking is done here, the user needs to ensure that
1147 * this function will not race with other calls to regmap.
bf315173
MB
1148 */
1149int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1150{
bf315173 1151 regcache_exit(map);
a24f64a6 1152 regmap_debugfs_exit(map);
bf315173
MB
1153
1154 map->max_register = config->max_register;
1155 map->writeable_reg = config->writeable_reg;
1156 map->readable_reg = config->readable_reg;
1157 map->volatile_reg = config->volatile_reg;
1158 map->precious_reg = config->precious_reg;
1159 map->cache_type = config->cache_type;
1160
d3c242e1 1161 regmap_debugfs_init(map, config->name);
a24f64a6 1162
421e8d2d
MB
1163 map->cache_bypass = false;
1164 map->cache_only = false;
1165
4d879514 1166 return regcache_init(map, config);
bf315173 1167}
752a6a5f 1168EXPORT_SYMBOL_GPL(regmap_reinit_cache);
bf315173 1169
b83a313b
MB
1170/**
1171 * regmap_exit(): Free a previously allocated register map
1172 */
1173void regmap_exit(struct regmap *map)
1174{
7e09a979
MB
1175 struct regmap_async *async;
1176
5d1729e7 1177 regcache_exit(map);
31244e39 1178 regmap_debugfs_exit(map);
6863ca62 1179 regmap_range_exit(map);
d2a5884a 1180 if (map->bus && map->bus->free_context)
0135bbcc 1181 map->bus->free_context(map->bus_context);
b83a313b 1182 kfree(map->work_buf);
7e09a979
MB
1183 while (!list_empty(&map->async_free)) {
1184 async = list_first_entry_or_null(&map->async_free,
1185 struct regmap_async,
1186 list);
1187 list_del(&async->list);
1188 kfree(async->work_buf);
1189 kfree(async);
1190 }
b83a313b
MB
1191 kfree(map);
1192}
1193EXPORT_SYMBOL_GPL(regmap_exit);
1194
72b39f6f
MB
1195static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1196{
1197 struct regmap **r = res;
1198 if (!r || !*r) {
1199 WARN_ON(!r || !*r);
1200 return 0;
1201 }
1202
1203 /* If the user didn't specify a name match any */
1204 if (data)
1205 return (*r)->name == data;
1206 else
1207 return 1;
1208}
1209
1210/**
1211 * dev_get_regmap(): Obtain the regmap (if any) for a device
1212 *
1213 * @dev: Device to retrieve the map for
1214 * @name: Optional name for the register map, usually NULL.
1215 *
1216 * Returns the regmap for the device if one is present, or NULL. If
1217 * name is specified then it must match the name specified when
1218 * registering the device, if it is NULL then the first regmap found
1219 * will be used. Devices with multiple register maps are very rare,
1220 * generic code should normally not need to specify a name.
1221 */
1222struct regmap *dev_get_regmap(struct device *dev, const char *name)
1223{
1224 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1225 dev_get_regmap_match, (void *)name);
1226
1227 if (!r)
1228 return NULL;
1229 return *r;
1230}
1231EXPORT_SYMBOL_GPL(dev_get_regmap);
1232
8d7d3972
TT
1233/**
1234 * regmap_get_device(): Obtain the device from a regmap
1235 *
1236 * @map: Register map to operate on.
1237 *
1238 * Returns the underlying device that the regmap has been created for.
1239 */
1240struct device *regmap_get_device(struct regmap *map)
1241{
1242 return map->dev;
1243}
fa2fbe4a 1244EXPORT_SYMBOL_GPL(regmap_get_device);
8d7d3972 1245
6863ca62 1246static int _regmap_select_page(struct regmap *map, unsigned int *reg,
98bc7dfd 1247 struct regmap_range_node *range,
6863ca62
KG
1248 unsigned int val_num)
1249{
6863ca62
KG
1250 void *orig_work_buf;
1251 unsigned int win_offset;
1252 unsigned int win_page;
1253 bool page_chg;
1254 int ret;
1255
98bc7dfd
MB
1256 win_offset = (*reg - range->range_min) % range->window_len;
1257 win_page = (*reg - range->range_min) / range->window_len;
6863ca62 1258
98bc7dfd
MB
1259 if (val_num > 1) {
1260 /* Bulk write shouldn't cross range boundary */
1261 if (*reg + val_num - 1 > range->range_max)
1262 return -EINVAL;
6863ca62 1263
98bc7dfd
MB
1264 /* ... or single page boundary */
1265 if (val_num > range->window_len - win_offset)
1266 return -EINVAL;
1267 }
6863ca62 1268
98bc7dfd
MB
1269 /* It is possible to have selector register inside data window.
1270 In that case, selector register is located on every page and
1271 it needs no page switching, when accessed alone. */
1272 if (val_num > 1 ||
1273 range->window_start + win_offset != range->selector_reg) {
1274 /* Use separate work_buf during page switching */
1275 orig_work_buf = map->work_buf;
1276 map->work_buf = map->selector_work_buf;
6863ca62 1277
98bc7dfd
MB
1278 ret = _regmap_update_bits(map, range->selector_reg,
1279 range->selector_mask,
1280 win_page << range->selector_shift,
7ff0589c 1281 &page_chg, false);
632a5b01 1282
98bc7dfd 1283 map->work_buf = orig_work_buf;
6863ca62 1284
0ff3e62f 1285 if (ret != 0)
98bc7dfd 1286 return ret;
6863ca62
KG
1287 }
1288
98bc7dfd
MB
1289 *reg = range->window_start + win_offset;
1290
6863ca62
KG
1291 return 0;
1292}
1293
584de329 1294int _regmap_raw_write(struct regmap *map, unsigned int reg,
0a819809 1295 const void *val, size_t val_len)
b83a313b 1296{
98bc7dfd 1297 struct regmap_range_node *range;
0d509f2b 1298 unsigned long flags;
6f306441 1299 u8 *u8 = map->work_buf;
0d509f2b
MB
1300 void *work_val = map->work_buf + map->format.reg_bytes +
1301 map->format.pad_bytes;
b83a313b
MB
1302 void *buf;
1303 int ret = -ENOTSUPP;
1304 size_t len;
73304781
MB
1305 int i;
1306
f1b5c5c3 1307 WARN_ON(!map->bus);
d2a5884a 1308
73304781
MB
1309 /* Check for unwritable registers before we start */
1310 if (map->writeable_reg)
1311 for (i = 0; i < val_len / map->format.val_bytes; i++)
f01ee60f
SW
1312 if (!map->writeable_reg(map->dev,
1313 reg + (i * map->reg_stride)))
73304781 1314 return -EINVAL;
b83a313b 1315
c9157198
LD
1316 if (!map->cache_bypass && map->format.parse_val) {
1317 unsigned int ival;
1318 int val_bytes = map->format.val_bytes;
1319 for (i = 0; i < val_len / val_bytes; i++) {
5a08d156 1320 ival = map->format.parse_val(val + (i * val_bytes));
f01ee60f
SW
1321 ret = regcache_write(map, reg + (i * map->reg_stride),
1322 ival);
c9157198
LD
1323 if (ret) {
1324 dev_err(map->dev,
6d04b8ac 1325 "Error in caching of register: %x ret: %d\n",
c9157198
LD
1326 reg + i, ret);
1327 return ret;
1328 }
1329 }
1330 if (map->cache_only) {
1331 map->cache_dirty = true;
1332 return 0;
1333 }
1334 }
1335
98bc7dfd
MB
1336 range = _regmap_range_lookup(map, reg);
1337 if (range) {
8a2ceac6
MB
1338 int val_num = val_len / map->format.val_bytes;
1339 int win_offset = (reg - range->range_min) % range->window_len;
1340 int win_residue = range->window_len - win_offset;
1341
1342 /* If the write goes beyond the end of the window split it */
1343 while (val_num > win_residue) {
1a61cfe3 1344 dev_dbg(map->dev, "Writing window %d/%zu\n",
8a2ceac6
MB
1345 win_residue, val_len / map->format.val_bytes);
1346 ret = _regmap_raw_write(map, reg, val, win_residue *
0a819809 1347 map->format.val_bytes);
8a2ceac6
MB
1348 if (ret != 0)
1349 return ret;
1350
1351 reg += win_residue;
1352 val_num -= win_residue;
1353 val += win_residue * map->format.val_bytes;
1354 val_len -= win_residue * map->format.val_bytes;
1355
1356 win_offset = (reg - range->range_min) %
1357 range->window_len;
1358 win_residue = range->window_len - win_offset;
1359 }
1360
1361 ret = _regmap_select_page(map, &reg, range, val_num);
0ff3e62f 1362 if (ret != 0)
98bc7dfd
MB
1363 return ret;
1364 }
6863ca62 1365
d939fb9a 1366 map->format.format_reg(map->work_buf, reg, map->reg_shift);
b83a313b 1367
6f306441
LPC
1368 u8[0] |= map->write_flag_mask;
1369
651e013e
MB
1370 /*
1371 * Essentially all I/O mechanisms will be faster with a single
1372 * buffer to write. Since register syncs often generate raw
1373 * writes of single registers optimise that case.
1374 */
1375 if (val != work_val && val_len == map->format.val_bytes) {
1376 memcpy(work_val, val, map->format.val_bytes);
1377 val = work_val;
1378 }
1379
0a819809 1380 if (map->async && map->bus->async_write) {
7e09a979 1381 struct regmap_async *async;
0d509f2b 1382
c6b570d9 1383 trace_regmap_async_write_start(map, reg, val_len);
fe7d4ccd 1384
7e09a979
MB
1385 spin_lock_irqsave(&map->async_lock, flags);
1386 async = list_first_entry_or_null(&map->async_free,
1387 struct regmap_async,
1388 list);
1389 if (async)
1390 list_del(&async->list);
1391 spin_unlock_irqrestore(&map->async_lock, flags);
1392
1393 if (!async) {
1394 async = map->bus->async_alloc();
1395 if (!async)
1396 return -ENOMEM;
1397
1398 async->work_buf = kzalloc(map->format.buf_size,
1399 GFP_KERNEL | GFP_DMA);
1400 if (!async->work_buf) {
1401 kfree(async);
1402 return -ENOMEM;
1403 }
0d509f2b
MB
1404 }
1405
0d509f2b
MB
1406 async->map = map;
1407
1408 /* If the caller supplied the value we can use it safely. */
1409 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1410 map->format.reg_bytes + map->format.val_bytes);
0d509f2b
MB
1411
1412 spin_lock_irqsave(&map->async_lock, flags);
1413 list_add_tail(&async->list, &map->async_list);
1414 spin_unlock_irqrestore(&map->async_lock, flags);
1415
04c50ccf
MB
1416 if (val != work_val)
1417 ret = map->bus->async_write(map->bus_context,
1418 async->work_buf,
1419 map->format.reg_bytes +
1420 map->format.pad_bytes,
1421 val, val_len, async);
1422 else
1423 ret = map->bus->async_write(map->bus_context,
1424 async->work_buf,
1425 map->format.reg_bytes +
1426 map->format.pad_bytes +
1427 val_len, NULL, 0, async);
0d509f2b
MB
1428
1429 if (ret != 0) {
1430 dev_err(map->dev, "Failed to schedule write: %d\n",
1431 ret);
1432
1433 spin_lock_irqsave(&map->async_lock, flags);
7e09a979 1434 list_move(&async->list, &map->async_free);
0d509f2b 1435 spin_unlock_irqrestore(&map->async_lock, flags);
0d509f2b 1436 }
f951b658
MB
1437
1438 return ret;
0d509f2b
MB
1439 }
1440
c6b570d9 1441 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
fb2736bb 1442
2547e201
MB
1443 /* If we're doing a single register write we can probably just
1444 * send the work_buf directly, otherwise try to do a gather
1445 * write.
1446 */
0d509f2b 1447 if (val == work_val)
0135bbcc 1448 ret = map->bus->write(map->bus_context, map->work_buf,
82159ba8
MB
1449 map->format.reg_bytes +
1450 map->format.pad_bytes +
1451 val_len);
2547e201 1452 else if (map->bus->gather_write)
0135bbcc 1453 ret = map->bus->gather_write(map->bus_context, map->work_buf,
82159ba8
MB
1454 map->format.reg_bytes +
1455 map->format.pad_bytes,
b83a313b
MB
1456 val, val_len);
1457
2547e201 1458 /* If that didn't work fall back on linearising by hand. */
b83a313b 1459 if (ret == -ENOTSUPP) {
82159ba8
MB
1460 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1461 buf = kzalloc(len, GFP_KERNEL);
b83a313b
MB
1462 if (!buf)
1463 return -ENOMEM;
1464
1465 memcpy(buf, map->work_buf, map->format.reg_bytes);
82159ba8
MB
1466 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1467 val, val_len);
0135bbcc 1468 ret = map->bus->write(map->bus_context, buf, len);
b83a313b
MB
1469
1470 kfree(buf);
1471 }
1472
c6b570d9 1473 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
fb2736bb 1474
b83a313b
MB
1475 return ret;
1476}
1477
221ad7f2
MB
1478/**
1479 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1480 *
1481 * @map: Map to check.
1482 */
1483bool regmap_can_raw_write(struct regmap *map)
1484{
07ea400e
MP
1485 return map->bus && map->bus->write && map->format.format_val &&
1486 map->format.format_reg;
221ad7f2
MB
1487}
1488EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1489
f50c9eb4
MP
1490/**
1491 * regmap_get_raw_read_max - Get the maximum size we can read
1492 *
1493 * @map: Map to check.
1494 */
1495size_t regmap_get_raw_read_max(struct regmap *map)
1496{
1497 return map->max_raw_read;
1498}
1499EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1500
1501/**
1502 * regmap_get_raw_write_max - Get the maximum size we can read
1503 *
1504 * @map: Map to check.
1505 */
1506size_t regmap_get_raw_write_max(struct regmap *map)
1507{
1508 return map->max_raw_write;
1509}
1510EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1511
07c320dc
AS
1512static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1513 unsigned int val)
1514{
1515 int ret;
1516 struct regmap_range_node *range;
1517 struct regmap *map = context;
1518
f1b5c5c3 1519 WARN_ON(!map->bus || !map->format.format_write);
07c320dc
AS
1520
1521 range = _regmap_range_lookup(map, reg);
1522 if (range) {
1523 ret = _regmap_select_page(map, &reg, range, 1);
1524 if (ret != 0)
1525 return ret;
1526 }
1527
1528 map->format.format_write(map, reg, val);
1529
c6b570d9 1530 trace_regmap_hw_write_start(map, reg, 1);
07c320dc
AS
1531
1532 ret = map->bus->write(map->bus_context, map->work_buf,
1533 map->format.buf_size);
1534
c6b570d9 1535 trace_regmap_hw_write_done(map, reg, 1);
07c320dc
AS
1536
1537 return ret;
1538}
1539
3ac17037
BB
1540static int _regmap_bus_reg_write(void *context, unsigned int reg,
1541 unsigned int val)
1542{
1543 struct regmap *map = context;
1544
1545 return map->bus->reg_write(map->bus_context, reg, val);
1546}
1547
07c320dc
AS
1548static int _regmap_bus_raw_write(void *context, unsigned int reg,
1549 unsigned int val)
1550{
1551 struct regmap *map = context;
1552
f1b5c5c3 1553 WARN_ON(!map->bus || !map->format.format_val);
07c320dc
AS
1554
1555 map->format.format_val(map->work_buf + map->format.reg_bytes
1556 + map->format.pad_bytes, val, 0);
1557 return _regmap_raw_write(map, reg,
1558 map->work_buf +
1559 map->format.reg_bytes +
1560 map->format.pad_bytes,
0a819809 1561 map->format.val_bytes);
07c320dc
AS
1562}
1563
d2a5884a
AS
1564static inline void *_regmap_map_get_context(struct regmap *map)
1565{
1566 return (map->bus) ? map : map->bus_context;
1567}
1568
4d2dc095
DP
1569int _regmap_write(struct regmap *map, unsigned int reg,
1570 unsigned int val)
b83a313b 1571{
fb2736bb 1572 int ret;
d2a5884a 1573 void *context = _regmap_map_get_context(map);
b83a313b 1574
515f2261
IN
1575 if (!regmap_writeable(map, reg))
1576 return -EIO;
1577
d2a5884a 1578 if (!map->cache_bypass && !map->defer_caching) {
5d1729e7
DP
1579 ret = regcache_write(map, reg, val);
1580 if (ret != 0)
1581 return ret;
8ae0d7e8
MB
1582 if (map->cache_only) {
1583 map->cache_dirty = true;
5d1729e7 1584 return 0;
8ae0d7e8 1585 }
5d1729e7
DP
1586 }
1587
1044c180 1588#ifdef LOG_DEVICE
5336be84 1589 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1044c180
MB
1590 dev_info(map->dev, "%x <= %x\n", reg, val);
1591#endif
1592
c6b570d9 1593 trace_regmap_reg_write(map, reg, val);
fb2736bb 1594
d2a5884a 1595 return map->reg_write(context, reg, val);
b83a313b
MB
1596}
1597
1598/**
1599 * regmap_write(): Write a value to a single register
1600 *
1601 * @map: Register map to write to
1602 * @reg: Register to write to
1603 * @val: Value to be written
1604 *
1605 * A value of zero will be returned on success, a negative errno will
1606 * be returned in error cases.
1607 */
1608int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1609{
1610 int ret;
1611
fcac0233 1612 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f
SW
1613 return -EINVAL;
1614
0d4529c5 1615 map->lock(map->lock_arg);
b83a313b
MB
1616
1617 ret = _regmap_write(map, reg, val);
1618
0d4529c5 1619 map->unlock(map->lock_arg);
b83a313b
MB
1620
1621 return ret;
1622}
1623EXPORT_SYMBOL_GPL(regmap_write);
1624
915f441b
MB
1625/**
1626 * regmap_write_async(): Write a value to a single register asynchronously
1627 *
1628 * @map: Register map to write to
1629 * @reg: Register to write to
1630 * @val: Value to be written
1631 *
1632 * A value of zero will be returned on success, a negative errno will
1633 * be returned in error cases.
1634 */
1635int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1636{
1637 int ret;
1638
fcac0233 1639 if (!IS_ALIGNED(reg, map->reg_stride))
915f441b
MB
1640 return -EINVAL;
1641
1642 map->lock(map->lock_arg);
1643
1644 map->async = true;
1645
1646 ret = _regmap_write(map, reg, val);
1647
1648 map->async = false;
1649
1650 map->unlock(map->lock_arg);
1651
1652 return ret;
1653}
1654EXPORT_SYMBOL_GPL(regmap_write_async);
1655
b83a313b
MB
1656/**
1657 * regmap_raw_write(): Write raw values to one or more registers
1658 *
1659 * @map: Register map to write to
1660 * @reg: Initial register to write to
1661 * @val: Block of data to be written, laid out for direct transmission to the
1662 * device
1663 * @val_len: Length of data pointed to by val.
1664 *
1665 * This function is intended to be used for things like firmware
1666 * download where a large block of data needs to be transferred to the
1667 * device. No formatting will be done on the data provided.
1668 *
1669 * A value of zero will be returned on success, a negative errno will
1670 * be returned in error cases.
1671 */
1672int regmap_raw_write(struct regmap *map, unsigned int reg,
1673 const void *val, size_t val_len)
1674{
1675 int ret;
1676
221ad7f2 1677 if (!regmap_can_raw_write(map))
d2a5884a 1678 return -EINVAL;
851960ba
SW
1679 if (val_len % map->format.val_bytes)
1680 return -EINVAL;
c335931e
MP
1681 if (map->max_raw_write && map->max_raw_write > val_len)
1682 return -E2BIG;
851960ba 1683
0d4529c5 1684 map->lock(map->lock_arg);
b83a313b 1685
0a819809 1686 ret = _regmap_raw_write(map, reg, val, val_len);
b83a313b 1687
0d4529c5 1688 map->unlock(map->lock_arg);
b83a313b
MB
1689
1690 return ret;
1691}
1692EXPORT_SYMBOL_GPL(regmap_raw_write);
1693
67252287
SK
1694/**
1695 * regmap_field_write(): Write a value to a single register field
1696 *
1697 * @field: Register field to write to
1698 * @val: Value to be written
1699 *
1700 * A value of zero will be returned on success, a negative errno will
1701 * be returned in error cases.
1702 */
1703int regmap_field_write(struct regmap_field *field, unsigned int val)
1704{
1705 return regmap_update_bits(field->regmap, field->reg,
1706 field->mask, val << field->shift);
1707}
1708EXPORT_SYMBOL_GPL(regmap_field_write);
1709
fdf20029
KM
1710/**
1711 * regmap_field_update_bits(): Perform a read/modify/write cycle
1712 * on the register field
1713 *
1714 * @field: Register field to write to
1715 * @mask: Bitmask to change
1716 * @val: Value to be written
1717 *
1718 * A value of zero will be returned on success, a negative errno will
1719 * be returned in error cases.
1720 */
1721int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
1722{
1723 mask = (mask << field->shift) & field->mask;
1724
1725 return regmap_update_bits(field->regmap, field->reg,
1726 mask, val << field->shift);
1727}
1728EXPORT_SYMBOL_GPL(regmap_field_update_bits);
1729
a0102375
KM
1730/**
1731 * regmap_fields_write(): Write a value to a single register field with port ID
1732 *
1733 * @field: Register field to write to
1734 * @id: port ID
1735 * @val: Value to be written
1736 *
1737 * A value of zero will be returned on success, a negative errno will
1738 * be returned in error cases.
1739 */
1740int regmap_fields_write(struct regmap_field *field, unsigned int id,
1741 unsigned int val)
1742{
1743 if (id >= field->id_size)
1744 return -EINVAL;
1745
1746 return regmap_update_bits(field->regmap,
1747 field->reg + (field->id_offset * id),
1748 field->mask, val << field->shift);
1749}
1750EXPORT_SYMBOL_GPL(regmap_fields_write);
1751
e874e6c7
KM
1752int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
1753 unsigned int val)
1754{
1755 if (id >= field->id_size)
1756 return -EINVAL;
1757
1758 return regmap_write_bits(field->regmap,
1759 field->reg + (field->id_offset * id),
1760 field->mask, val << field->shift);
1761}
1762EXPORT_SYMBOL_GPL(regmap_fields_force_write);
1763
a0102375
KM
1764/**
1765 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1766 * on the register field
1767 *
1768 * @field: Register field to write to
1769 * @id: port ID
1770 * @mask: Bitmask to change
1771 * @val: Value to be written
1772 *
1773 * A value of zero will be returned on success, a negative errno will
1774 * be returned in error cases.
1775 */
1776int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
1777 unsigned int mask, unsigned int val)
1778{
1779 if (id >= field->id_size)
1780 return -EINVAL;
1781
1782 mask = (mask << field->shift) & field->mask;
1783
1784 return regmap_update_bits(field->regmap,
1785 field->reg + (field->id_offset * id),
1786 mask, val << field->shift);
1787}
1788EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
1789
8eaeb219
LD
1790/*
1791 * regmap_bulk_write(): Write multiple registers to the device
1792 *
1793 * @map: Register map to write to
1794 * @reg: First register to be write from
1795 * @val: Block of data to be written, in native register size for device
1796 * @val_count: Number of registers to write
1797 *
1798 * This function is intended to be used for writing a large block of
31b35e9e 1799 * data to the device either in single transfer or multiple transfer.
8eaeb219
LD
1800 *
1801 * A value of zero will be returned on success, a negative errno will
1802 * be returned in error cases.
1803 */
1804int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1805 size_t val_count)
1806{
1807 int ret = 0, i;
1808 size_t val_bytes = map->format.val_bytes;
adaac459 1809 size_t total_size = val_bytes * val_count;
8eaeb219 1810
f4298360 1811 if (map->bus && !map->format.parse_inplace)
8eaeb219 1812 return -EINVAL;
fcac0233 1813 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f 1814 return -EINVAL;
8eaeb219 1815
f4298360
SB
1816 /*
1817 * Some devices don't support bulk write, for
c594b7f2
MP
1818 * them we have a series of single write operations in the first two if
1819 * blocks.
1820 *
1821 * The first if block is used for memory mapped io. It does not allow
1822 * val_bytes of 3 for example.
1823 * The second one is used for busses which do not have this limitation
1824 * and can write arbitrary value lengths.
f4298360 1825 */
c594b7f2 1826 if (!map->bus) {
4999e962 1827 map->lock(map->lock_arg);
f4298360
SB
1828 for (i = 0; i < val_count; i++) {
1829 unsigned int ival;
1830
1831 switch (val_bytes) {
1832 case 1:
1833 ival = *(u8 *)(val + (i * val_bytes));
1834 break;
1835 case 2:
1836 ival = *(u16 *)(val + (i * val_bytes));
1837 break;
1838 case 4:
1839 ival = *(u32 *)(val + (i * val_bytes));
1840 break;
1841#ifdef CONFIG_64BIT
1842 case 8:
1843 ival = *(u64 *)(val + (i * val_bytes));
1844 break;
1845#endif
1846 default:
1847 ret = -EINVAL;
1848 goto out;
1849 }
8eaeb219 1850
f4298360
SB
1851 ret = _regmap_write(map, reg + (i * map->reg_stride),
1852 ival);
1853 if (ret != 0)
1854 goto out;
1855 }
4999e962
TI
1856out:
1857 map->unlock(map->lock_arg);
adaac459
MP
1858 } else if (map->use_single_write ||
1859 (map->max_raw_write && map->max_raw_write < total_size)) {
1860 int chunk_stride = map->reg_stride;
1861 size_t chunk_size = val_bytes;
1862 size_t chunk_count = val_count;
1863
1864 if (!map->use_single_write) {
1865 chunk_size = map->max_raw_write;
1866 if (chunk_size % val_bytes)
1867 chunk_size -= chunk_size % val_bytes;
1868 chunk_count = total_size / chunk_size;
1869 chunk_stride *= chunk_size / val_bytes;
1870 }
1871
c594b7f2 1872 map->lock(map->lock_arg);
adaac459
MP
1873 /* Write as many bytes as possible with chunk_size */
1874 for (i = 0; i < chunk_count; i++) {
c594b7f2 1875 ret = _regmap_raw_write(map,
adaac459
MP
1876 reg + (i * chunk_stride),
1877 val + (i * chunk_size),
1878 chunk_size);
c594b7f2
MP
1879 if (ret)
1880 break;
1881 }
adaac459
MP
1882
1883 /* Write remaining bytes */
1884 if (!ret && chunk_size * i < total_size) {
1885 ret = _regmap_raw_write(map, reg + (i * chunk_stride),
1886 val + (i * chunk_size),
1887 total_size - i * chunk_size);
1888 }
c594b7f2 1889 map->unlock(map->lock_arg);
8eaeb219 1890 } else {
f4298360
SB
1891 void *wval;
1892
d6b41cb0
XL
1893 if (!val_count)
1894 return -EINVAL;
1895
b4a21fc2 1896 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
8eaeb219 1897 if (!wval) {
8eaeb219 1898 dev_err(map->dev, "Error in memory allocation\n");
4999e962 1899 return -ENOMEM;
8eaeb219
LD
1900 }
1901 for (i = 0; i < val_count * val_bytes; i += val_bytes)
8a819ff8 1902 map->format.parse_inplace(wval + i);
f4298360 1903
4999e962 1904 map->lock(map->lock_arg);
0a819809 1905 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
4999e962 1906 map->unlock(map->lock_arg);
8eaeb219 1907
8eaeb219 1908 kfree(wval);
f4298360 1909 }
8eaeb219
LD
1910 return ret;
1911}
1912EXPORT_SYMBOL_GPL(regmap_bulk_write);
1913
e894c3f4
OAO
1914/*
1915 * _regmap_raw_multi_reg_write()
1916 *
1917 * the (register,newvalue) pairs in regs have not been formatted, but
1918 * they are all in the same page and have been changed to being page
b486afbd 1919 * relative. The page register has been written if that was necessary.
e894c3f4
OAO
1920 */
1921static int _regmap_raw_multi_reg_write(struct regmap *map,
8019ff6c 1922 const struct reg_sequence *regs,
e894c3f4
OAO
1923 size_t num_regs)
1924{
1925 int ret;
1926 void *buf;
1927 int i;
1928 u8 *u8;
1929 size_t val_bytes = map->format.val_bytes;
1930 size_t reg_bytes = map->format.reg_bytes;
1931 size_t pad_bytes = map->format.pad_bytes;
1932 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1933 size_t len = pair_size * num_regs;
1934
f5727cd3
XL
1935 if (!len)
1936 return -EINVAL;
1937
e894c3f4
OAO
1938 buf = kzalloc(len, GFP_KERNEL);
1939 if (!buf)
1940 return -ENOMEM;
1941
1942 /* We have to linearise by hand. */
1943
1944 u8 = buf;
1945
1946 for (i = 0; i < num_regs; i++) {
2f9b660b
MP
1947 unsigned int reg = regs[i].reg;
1948 unsigned int val = regs[i].def;
c6b570d9 1949 trace_regmap_hw_write_start(map, reg, 1);
e894c3f4
OAO
1950 map->format.format_reg(u8, reg, map->reg_shift);
1951 u8 += reg_bytes + pad_bytes;
1952 map->format.format_val(u8, val, 0);
1953 u8 += val_bytes;
1954 }
1955 u8 = buf;
1956 *u8 |= map->write_flag_mask;
1957
1958 ret = map->bus->write(map->bus_context, buf, len);
1959
1960 kfree(buf);
1961
1962 for (i = 0; i < num_regs; i++) {
1963 int reg = regs[i].reg;
c6b570d9 1964 trace_regmap_hw_write_done(map, reg, 1);
e894c3f4
OAO
1965 }
1966 return ret;
1967}
1968
1969static unsigned int _regmap_register_page(struct regmap *map,
1970 unsigned int reg,
1971 struct regmap_range_node *range)
1972{
1973 unsigned int win_page = (reg - range->range_min) / range->window_len;
1974
1975 return win_page;
1976}
1977
1978static int _regmap_range_multi_paged_reg_write(struct regmap *map,
8019ff6c 1979 struct reg_sequence *regs,
e894c3f4
OAO
1980 size_t num_regs)
1981{
1982 int ret;
1983 int i, n;
8019ff6c 1984 struct reg_sequence *base;
b48d1398 1985 unsigned int this_page = 0;
2de9d600 1986 unsigned int page_change = 0;
e894c3f4
OAO
1987 /*
1988 * the set of registers are not neccessarily in order, but
1989 * since the order of write must be preserved this algorithm
2de9d600
NP
1990 * chops the set each time the page changes. This also applies
1991 * if there is a delay required at any point in the sequence.
e894c3f4
OAO
1992 */
1993 base = regs;
1994 for (i = 0, n = 0; i < num_regs; i++, n++) {
1995 unsigned int reg = regs[i].reg;
1996 struct regmap_range_node *range;
1997
1998 range = _regmap_range_lookup(map, reg);
1999 if (range) {
2000 unsigned int win_page = _regmap_register_page(map, reg,
2001 range);
2002
2003 if (i == 0)
2004 this_page = win_page;
2005 if (win_page != this_page) {
2006 this_page = win_page;
2de9d600
NP
2007 page_change = 1;
2008 }
2009 }
2010
2011 /* If we have both a page change and a delay make sure to
2012 * write the regs and apply the delay before we change the
2013 * page.
2014 */
2015
2016 if (page_change || regs[i].delay_us) {
2017
2018 /* For situations where the first write requires
2019 * a delay we need to make sure we don't call
2020 * raw_multi_reg_write with n=0
2021 * This can't occur with page breaks as we
2022 * never write on the first iteration
2023 */
2024 if (regs[i].delay_us && i == 0)
2025 n = 1;
2026
e894c3f4
OAO
2027 ret = _regmap_raw_multi_reg_write(map, base, n);
2028 if (ret != 0)
2029 return ret;
2de9d600
NP
2030
2031 if (regs[i].delay_us)
2032 udelay(regs[i].delay_us);
2033
e894c3f4
OAO
2034 base += n;
2035 n = 0;
2de9d600
NP
2036
2037 if (page_change) {
2038 ret = _regmap_select_page(map,
2039 &base[n].reg,
2040 range, 1);
2041 if (ret != 0)
2042 return ret;
2043
2044 page_change = 0;
2045 }
2046
e894c3f4 2047 }
2de9d600 2048
e894c3f4
OAO
2049 }
2050 if (n > 0)
2051 return _regmap_raw_multi_reg_write(map, base, n);
2052 return 0;
2053}
2054
1d5b40bc 2055static int _regmap_multi_reg_write(struct regmap *map,
8019ff6c 2056 const struct reg_sequence *regs,
e894c3f4 2057 size_t num_regs)
1d5b40bc 2058{
e894c3f4
OAO
2059 int i;
2060 int ret;
2061
2062 if (!map->can_multi_write) {
2063 for (i = 0; i < num_regs; i++) {
2064 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2065 if (ret != 0)
2066 return ret;
2de9d600
NP
2067
2068 if (regs[i].delay_us)
2069 udelay(regs[i].delay_us);
e894c3f4
OAO
2070 }
2071 return 0;
2072 }
2073
2074 if (!map->format.parse_inplace)
2075 return -EINVAL;
2076
2077 if (map->writeable_reg)
2078 for (i = 0; i < num_regs; i++) {
2079 int reg = regs[i].reg;
2080 if (!map->writeable_reg(map->dev, reg))
2081 return -EINVAL;
fcac0233 2082 if (!IS_ALIGNED(reg, map->reg_stride))
e894c3f4
OAO
2083 return -EINVAL;
2084 }
2085
2086 if (!map->cache_bypass) {
2087 for (i = 0; i < num_regs; i++) {
2088 unsigned int val = regs[i].def;
2089 unsigned int reg = regs[i].reg;
2090 ret = regcache_write(map, reg, val);
2091 if (ret) {
2092 dev_err(map->dev,
2093 "Error in caching of register: %x ret: %d\n",
2094 reg, ret);
2095 return ret;
2096 }
2097 }
2098 if (map->cache_only) {
2099 map->cache_dirty = true;
2100 return 0;
2101 }
2102 }
2103
2104 WARN_ON(!map->bus);
1d5b40bc
CK
2105
2106 for (i = 0; i < num_regs; i++) {
e894c3f4
OAO
2107 unsigned int reg = regs[i].reg;
2108 struct regmap_range_node *range;
2de9d600
NP
2109
2110 /* Coalesce all the writes between a page break or a delay
2111 * in a sequence
2112 */
e894c3f4 2113 range = _regmap_range_lookup(map, reg);
2de9d600 2114 if (range || regs[i].delay_us) {
8019ff6c
NP
2115 size_t len = sizeof(struct reg_sequence)*num_regs;
2116 struct reg_sequence *base = kmemdup(regs, len,
e894c3f4
OAO
2117 GFP_KERNEL);
2118 if (!base)
2119 return -ENOMEM;
2120 ret = _regmap_range_multi_paged_reg_write(map, base,
2121 num_regs);
2122 kfree(base);
2123
1d5b40bc
CK
2124 return ret;
2125 }
2126 }
e894c3f4 2127 return _regmap_raw_multi_reg_write(map, regs, num_regs);
1d5b40bc
CK
2128}
2129
e33fabd3
AO
2130/*
2131 * regmap_multi_reg_write(): Write multiple registers to the device
2132 *
e894c3f4
OAO
2133 * where the set of register,value pairs are supplied in any order,
2134 * possibly not all in a single range.
e33fabd3
AO
2135 *
2136 * @map: Register map to write to
2137 * @regs: Array of structures containing register,value to be written
2138 * @num_regs: Number of registers to write
2139 *
e894c3f4
OAO
2140 * The 'normal' block write mode will send ultimately send data on the
2141 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
2142 * addressed. However, this alternative block multi write mode will send
2143 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2144 * must of course support the mode.
e33fabd3 2145 *
e894c3f4
OAO
2146 * A value of zero will be returned on success, a negative errno will be
2147 * returned in error cases.
e33fabd3 2148 */
8019ff6c 2149int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
f7e2cec0 2150 int num_regs)
e33fabd3 2151{
1d5b40bc 2152 int ret;
e33fabd3
AO
2153
2154 map->lock(map->lock_arg);
2155
1d5b40bc
CK
2156 ret = _regmap_multi_reg_write(map, regs, num_regs);
2157
e33fabd3
AO
2158 map->unlock(map->lock_arg);
2159
2160 return ret;
2161}
2162EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2163
1d5b40bc
CK
2164/*
2165 * regmap_multi_reg_write_bypassed(): Write multiple registers to the
2166 * device but not the cache
2167 *
e33fabd3
AO
2168 * where the set of register are supplied in any order
2169 *
2170 * @map: Register map to write to
2171 * @regs: Array of structures containing register,value to be written
2172 * @num_regs: Number of registers to write
2173 *
2174 * This function is intended to be used for writing a large block of data
2175 * atomically to the device in single transfer for those I2C client devices
2176 * that implement this alternative block write mode.
2177 *
2178 * A value of zero will be returned on success, a negative errno will
2179 * be returned in error cases.
2180 */
1d5b40bc 2181int regmap_multi_reg_write_bypassed(struct regmap *map,
8019ff6c 2182 const struct reg_sequence *regs,
1d5b40bc 2183 int num_regs)
e33fabd3 2184{
1d5b40bc
CK
2185 int ret;
2186 bool bypass;
e33fabd3
AO
2187
2188 map->lock(map->lock_arg);
2189
1d5b40bc
CK
2190 bypass = map->cache_bypass;
2191 map->cache_bypass = true;
2192
2193 ret = _regmap_multi_reg_write(map, regs, num_regs);
2194
2195 map->cache_bypass = bypass;
2196
e33fabd3
AO
2197 map->unlock(map->lock_arg);
2198
2199 return ret;
2200}
1d5b40bc 2201EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
e33fabd3 2202
0d509f2b
MB
2203/**
2204 * regmap_raw_write_async(): Write raw values to one or more registers
2205 * asynchronously
2206 *
2207 * @map: Register map to write to
2208 * @reg: Initial register to write to
2209 * @val: Block of data to be written, laid out for direct transmission to the
2210 * device. Must be valid until regmap_async_complete() is called.
2211 * @val_len: Length of data pointed to by val.
2212 *
2213 * This function is intended to be used for things like firmware
2214 * download where a large block of data needs to be transferred to the
2215 * device. No formatting will be done on the data provided.
2216 *
2217 * If supported by the underlying bus the write will be scheduled
2218 * asynchronously, helping maximise I/O speed on higher speed buses
2219 * like SPI. regmap_async_complete() can be called to ensure that all
2220 * asynchrnous writes have been completed.
2221 *
2222 * A value of zero will be returned on success, a negative errno will
2223 * be returned in error cases.
2224 */
2225int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2226 const void *val, size_t val_len)
2227{
2228 int ret;
2229
2230 if (val_len % map->format.val_bytes)
2231 return -EINVAL;
fcac0233 2232 if (!IS_ALIGNED(reg, map->reg_stride))
0d509f2b
MB
2233 return -EINVAL;
2234
2235 map->lock(map->lock_arg);
2236
0a819809
MB
2237 map->async = true;
2238
2239 ret = _regmap_raw_write(map, reg, val, val_len);
2240
2241 map->async = false;
0d509f2b
MB
2242
2243 map->unlock(map->lock_arg);
2244
2245 return ret;
2246}
2247EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2248
b83a313b
MB
2249static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2250 unsigned int val_len)
2251{
98bc7dfd 2252 struct regmap_range_node *range;
b83a313b
MB
2253 u8 *u8 = map->work_buf;
2254 int ret;
2255
f1b5c5c3 2256 WARN_ON(!map->bus);
d2a5884a 2257
98bc7dfd
MB
2258 range = _regmap_range_lookup(map, reg);
2259 if (range) {
2260 ret = _regmap_select_page(map, &reg, range,
2261 val_len / map->format.val_bytes);
0ff3e62f 2262 if (ret != 0)
98bc7dfd
MB
2263 return ret;
2264 }
6863ca62 2265
d939fb9a 2266 map->format.format_reg(map->work_buf, reg, map->reg_shift);
b83a313b
MB
2267
2268 /*
6f306441 2269 * Some buses or devices flag reads by setting the high bits in the
b486afbd 2270 * register address; since it's always the high bits for all
b83a313b
MB
2271 * current formats we can do this here rather than in
2272 * formatting. This may break if we get interesting formats.
2273 */
6f306441 2274 u8[0] |= map->read_flag_mask;
b83a313b 2275
c6b570d9 2276 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
fb2736bb 2277
0135bbcc 2278 ret = map->bus->read(map->bus_context, map->work_buf,
82159ba8 2279 map->format.reg_bytes + map->format.pad_bytes,
40c5cc26 2280 val, val_len);
b83a313b 2281
c6b570d9 2282 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
fb2736bb
MB
2283
2284 return ret;
b83a313b
MB
2285}
2286
3ac17037
BB
2287static int _regmap_bus_reg_read(void *context, unsigned int reg,
2288 unsigned int *val)
2289{
2290 struct regmap *map = context;
2291
2292 return map->bus->reg_read(map->bus_context, reg, val);
2293}
2294
ad278406
AS
2295static int _regmap_bus_read(void *context, unsigned int reg,
2296 unsigned int *val)
2297{
2298 int ret;
2299 struct regmap *map = context;
2300
2301 if (!map->format.parse_val)
2302 return -EINVAL;
2303
2304 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
2305 if (ret == 0)
2306 *val = map->format.parse_val(map->work_buf);
2307
2308 return ret;
2309}
2310
b83a313b
MB
2311static int _regmap_read(struct regmap *map, unsigned int reg,
2312 unsigned int *val)
2313{
2314 int ret;
d2a5884a
AS
2315 void *context = _regmap_map_get_context(map);
2316
5d1729e7
DP
2317 if (!map->cache_bypass) {
2318 ret = regcache_read(map, reg, val);
2319 if (ret == 0)
2320 return 0;
2321 }
2322
2323 if (map->cache_only)
2324 return -EBUSY;
2325
d4807ad2
MS
2326 if (!regmap_readable(map, reg))
2327 return -EIO;
2328
d2a5884a 2329 ret = map->reg_read(context, reg, val);
fb2736bb 2330 if (ret == 0) {
1044c180 2331#ifdef LOG_DEVICE
5336be84 2332 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1044c180
MB
2333 dev_info(map->dev, "%x => %x\n", reg, *val);
2334#endif
2335
c6b570d9 2336 trace_regmap_reg_read(map, reg, *val);
b83a313b 2337
ad278406
AS
2338 if (!map->cache_bypass)
2339 regcache_write(map, reg, *val);
2340 }
f2985367 2341
b83a313b
MB
2342 return ret;
2343}
2344
2345/**
2346 * regmap_read(): Read a value from a single register
2347 *
0093380c 2348 * @map: Register map to read from
b83a313b
MB
2349 * @reg: Register to be read from
2350 * @val: Pointer to store read value
2351 *
2352 * A value of zero will be returned on success, a negative errno will
2353 * be returned in error cases.
2354 */
2355int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2356{
2357 int ret;
2358
fcac0233 2359 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f
SW
2360 return -EINVAL;
2361
0d4529c5 2362 map->lock(map->lock_arg);
b83a313b
MB
2363
2364 ret = _regmap_read(map, reg, val);
2365
0d4529c5 2366 map->unlock(map->lock_arg);
b83a313b
MB
2367
2368 return ret;
2369}
2370EXPORT_SYMBOL_GPL(regmap_read);
2371
2372/**
2373 * regmap_raw_read(): Read raw data from the device
2374 *
0093380c 2375 * @map: Register map to read from
b83a313b
MB
2376 * @reg: First register to be read from
2377 * @val: Pointer to store read value
2378 * @val_len: Size of data to read
2379 *
2380 * A value of zero will be returned on success, a negative errno will
2381 * be returned in error cases.
2382 */
2383int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2384 size_t val_len)
2385{
b8fb5ab1
MB
2386 size_t val_bytes = map->format.val_bytes;
2387 size_t val_count = val_len / val_bytes;
2388 unsigned int v;
2389 int ret, i;
04e016ad 2390
d2a5884a
AS
2391 if (!map->bus)
2392 return -EINVAL;
851960ba
SW
2393 if (val_len % map->format.val_bytes)
2394 return -EINVAL;
fcac0233 2395 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f 2396 return -EINVAL;
fa3eec77
MB
2397 if (val_count == 0)
2398 return -EINVAL;
851960ba 2399
0d4529c5 2400 map->lock(map->lock_arg);
b83a313b 2401
b8fb5ab1
MB
2402 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2403 map->cache_type == REGCACHE_NONE) {
9a16ea90
MP
2404 if (!map->bus->read) {
2405 ret = -ENOTSUPP;
2406 goto out;
2407 }
c335931e
MP
2408 if (map->max_raw_read && map->max_raw_read < val_len) {
2409 ret = -E2BIG;
2410 goto out;
2411 }
9a16ea90 2412
b8fb5ab1
MB
2413 /* Physical block read if there's no cache involved */
2414 ret = _regmap_raw_read(map, reg, val, val_len);
2415
2416 } else {
2417 /* Otherwise go word by word for the cache; should be low
2418 * cost as we expect to hit the cache.
2419 */
2420 for (i = 0; i < val_count; i++) {
f01ee60f
SW
2421 ret = _regmap_read(map, reg + (i * map->reg_stride),
2422 &v);
b8fb5ab1
MB
2423 if (ret != 0)
2424 goto out;
2425
d939fb9a 2426 map->format.format_val(val + (i * val_bytes), v, 0);
b8fb5ab1
MB
2427 }
2428 }
b83a313b 2429
b8fb5ab1 2430 out:
0d4529c5 2431 map->unlock(map->lock_arg);
b83a313b
MB
2432
2433 return ret;
2434}
2435EXPORT_SYMBOL_GPL(regmap_raw_read);
2436
67252287
SK
2437/**
2438 * regmap_field_read(): Read a value to a single register field
2439 *
2440 * @field: Register field to read from
2441 * @val: Pointer to store read value
2442 *
2443 * A value of zero will be returned on success, a negative errno will
2444 * be returned in error cases.
2445 */
2446int regmap_field_read(struct regmap_field *field, unsigned int *val)
2447{
2448 int ret;
2449 unsigned int reg_val;
2450 ret = regmap_read(field->regmap, field->reg, &reg_val);
2451 if (ret != 0)
2452 return ret;
2453
2454 reg_val &= field->mask;
2455 reg_val >>= field->shift;
2456 *val = reg_val;
2457
2458 return ret;
2459}
2460EXPORT_SYMBOL_GPL(regmap_field_read);
2461
a0102375
KM
2462/**
2463 * regmap_fields_read(): Read a value to a single register field with port ID
2464 *
2465 * @field: Register field to read from
2466 * @id: port ID
2467 * @val: Pointer to store read value
2468 *
2469 * A value of zero will be returned on success, a negative errno will
2470 * be returned in error cases.
2471 */
2472int regmap_fields_read(struct regmap_field *field, unsigned int id,
2473 unsigned int *val)
2474{
2475 int ret;
2476 unsigned int reg_val;
2477
2478 if (id >= field->id_size)
2479 return -EINVAL;
2480
2481 ret = regmap_read(field->regmap,
2482 field->reg + (field->id_offset * id),
2483 &reg_val);
2484 if (ret != 0)
2485 return ret;
2486
2487 reg_val &= field->mask;
2488 reg_val >>= field->shift;
2489 *val = reg_val;
2490
2491 return ret;
2492}
2493EXPORT_SYMBOL_GPL(regmap_fields_read);
2494
b83a313b
MB
2495/**
2496 * regmap_bulk_read(): Read multiple registers from the device
2497 *
0093380c 2498 * @map: Register map to read from
b83a313b
MB
2499 * @reg: First register to be read from
2500 * @val: Pointer to store read value, in native register size for device
2501 * @val_count: Number of registers to read
2502 *
2503 * A value of zero will be returned on success, a negative errno will
2504 * be returned in error cases.
2505 */
2506int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2507 size_t val_count)
2508{
2509 int ret, i;
2510 size_t val_bytes = map->format.val_bytes;
82cd9965 2511 bool vol = regmap_volatile_range(map, reg, val_count);
5d1729e7 2512
fcac0233 2513 if (!IS_ALIGNED(reg, map->reg_stride))
f01ee60f 2514 return -EINVAL;
b83a313b 2515
3b58ee13 2516 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2e33caf1
AJ
2517 /*
2518 * Some devices does not support bulk read, for
2519 * them we have a series of single read operations.
2520 */
adaac459
MP
2521 size_t total_size = val_bytes * val_count;
2522
2523 if (!map->use_single_read &&
2524 (!map->max_raw_read || map->max_raw_read > total_size)) {
2e33caf1
AJ
2525 ret = regmap_raw_read(map, reg, val,
2526 val_bytes * val_count);
2527 if (ret != 0)
2528 return ret;
adaac459
MP
2529 } else {
2530 /*
2531 * Some devices do not support bulk read or do not
2532 * support large bulk reads, for them we have a series
2533 * of read operations.
2534 */
2535 int chunk_stride = map->reg_stride;
2536 size_t chunk_size = val_bytes;
2537 size_t chunk_count = val_count;
2538
2539 if (!map->use_single_read) {
2540 chunk_size = map->max_raw_read;
2541 if (chunk_size % val_bytes)
2542 chunk_size -= chunk_size % val_bytes;
2543 chunk_count = total_size / chunk_size;
2544 chunk_stride *= chunk_size / val_bytes;
2545 }
2546
2547 /* Read bytes that fit into a multiple of chunk_size */
2548 for (i = 0; i < chunk_count; i++) {
2549 ret = regmap_raw_read(map,
2550 reg + (i * chunk_stride),
2551 val + (i * chunk_size),
2552 chunk_size);
2553 if (ret != 0)
2554 return ret;
2555 }
2556
2557 /* Read remaining bytes */
2558 if (chunk_size * i < total_size) {
2559 ret = regmap_raw_read(map,
2560 reg + (i * chunk_stride),
2561 val + (i * chunk_size),
2562 total_size - i * chunk_size);
2563 if (ret != 0)
2564 return ret;
2565 }
2e33caf1 2566 }
de2d808f
MB
2567
2568 for (i = 0; i < val_count * val_bytes; i += val_bytes)
8a819ff8 2569 map->format.parse_inplace(val + i);
de2d808f
MB
2570 } else {
2571 for (i = 0; i < val_count; i++) {
6560ffd1 2572 unsigned int ival;
f01ee60f 2573 ret = regmap_read(map, reg + (i * map->reg_stride),
25061d28 2574 &ival);
de2d808f
MB
2575 if (ret != 0)
2576 return ret;
d5b98eb1
MB
2577
2578 if (map->format.format_val) {
2579 map->format.format_val(val + (i * val_bytes), ival, 0);
2580 } else {
2581 /* Devices providing read and write
2582 * operations can use the bulk I/O
2583 * functions if they define a val_bytes,
2584 * we assume that the values are native
2585 * endian.
2586 */
19c04788 2587#ifdef CONFIG_64BIT
afcc00b9 2588 u64 *u64 = val;
19c04788 2589#endif
d5b98eb1
MB
2590 u32 *u32 = val;
2591 u16 *u16 = val;
2592 u8 *u8 = val;
2593
2594 switch (map->format.val_bytes) {
afcc00b9
XL
2595#ifdef CONFIG_64BIT
2596 case 8:
2597 u64[i] = ival;
2598 break;
2599#endif
d5b98eb1
MB
2600 case 4:
2601 u32[i] = ival;
2602 break;
2603 case 2:
2604 u16[i] = ival;
2605 break;
2606 case 1:
2607 u8[i] = ival;
2608 break;
2609 default:
2610 return -EINVAL;
2611 }
2612 }
de2d808f
MB
2613 }
2614 }
b83a313b
MB
2615
2616 return 0;
2617}
2618EXPORT_SYMBOL_GPL(regmap_bulk_read);
2619
018690d3
MB
2620static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2621 unsigned int mask, unsigned int val,
7ff0589c 2622 bool *change, bool force_write)
b83a313b
MB
2623{
2624 int ret;
d91e8db2 2625 unsigned int tmp, orig;
b83a313b 2626
77792b11
JR
2627 if (change)
2628 *change = false;
b83a313b 2629
77792b11
JR
2630 if (regmap_volatile(map, reg) && map->reg_update_bits) {
2631 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2632 if (ret == 0 && change)
e2f74dc6 2633 *change = true;
018690d3 2634 } else {
77792b11
JR
2635 ret = _regmap_read(map, reg, &orig);
2636 if (ret != 0)
2637 return ret;
2638
2639 tmp = orig & ~mask;
2640 tmp |= val & mask;
2641
2642 if (force_write || (tmp != orig)) {
2643 ret = _regmap_write(map, reg, tmp);
2644 if (ret == 0 && change)
2645 *change = true;
2646 }
018690d3 2647 }
b83a313b 2648
b83a313b
MB
2649 return ret;
2650}
018690d3
MB
2651
2652/**
2653 * regmap_update_bits: Perform a read/modify/write cycle on the register map
2654 *
2655 * @map: Register map to update
2656 * @reg: Register to update
2657 * @mask: Bitmask to change
2658 * @val: New value for bitmask
2659 *
2660 * Returns zero for success, a negative number on error.
2661 */
2662int regmap_update_bits(struct regmap *map, unsigned int reg,
2663 unsigned int mask, unsigned int val)
2664{
fc3ebd78
KG
2665 int ret;
2666
0d4529c5 2667 map->lock(map->lock_arg);
7ff0589c 2668 ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
0d4529c5 2669 map->unlock(map->lock_arg);
fc3ebd78
KG
2670
2671 return ret;
018690d3 2672}
b83a313b 2673EXPORT_SYMBOL_GPL(regmap_update_bits);
31244e39 2674
fd4b7286
KM
2675/**
2676 * regmap_write_bits: Perform a read/modify/write cycle on the register map
2677 *
2678 * @map: Register map to update
2679 * @reg: Register to update
2680 * @mask: Bitmask to change
2681 * @val: New value for bitmask
2682 *
2683 * Returns zero for success, a negative number on error.
2684 */
2685int regmap_write_bits(struct regmap *map, unsigned int reg,
2686 unsigned int mask, unsigned int val)
2687{
2688 int ret;
2689
2690 map->lock(map->lock_arg);
2691 ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
2692 map->unlock(map->lock_arg);
2693
2694 return ret;
2695}
2696EXPORT_SYMBOL_GPL(regmap_write_bits);
2697
915f441b
MB
2698/**
2699 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2700 * map asynchronously
2701 *
2702 * @map: Register map to update
2703 * @reg: Register to update
2704 * @mask: Bitmask to change
2705 * @val: New value for bitmask
2706 *
2707 * With most buses the read must be done synchronously so this is most
2708 * useful for devices with a cache which do not need to interact with
2709 * the hardware to determine the current register value.
2710 *
2711 * Returns zero for success, a negative number on error.
2712 */
2713int regmap_update_bits_async(struct regmap *map, unsigned int reg,
2714 unsigned int mask, unsigned int val)
2715{
915f441b
MB
2716 int ret;
2717
2718 map->lock(map->lock_arg);
2719
2720 map->async = true;
2721
7ff0589c 2722 ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
915f441b
MB
2723
2724 map->async = false;
2725
2726 map->unlock(map->lock_arg);
2727
2728 return ret;
2729}
2730EXPORT_SYMBOL_GPL(regmap_update_bits_async);
2731
018690d3
MB
2732/**
2733 * regmap_update_bits_check: Perform a read/modify/write cycle on the
2734 * register map and report if updated
2735 *
2736 * @map: Register map to update
2737 * @reg: Register to update
2738 * @mask: Bitmask to change
2739 * @val: New value for bitmask
2740 * @change: Boolean indicating if a write was done
2741 *
2742 * Returns zero for success, a negative number on error.
2743 */
2744int regmap_update_bits_check(struct regmap *map, unsigned int reg,
2745 unsigned int mask, unsigned int val,
2746 bool *change)
2747{
fc3ebd78
KG
2748 int ret;
2749
0d4529c5 2750 map->lock(map->lock_arg);
7ff0589c 2751 ret = _regmap_update_bits(map, reg, mask, val, change, false);
0d4529c5 2752 map->unlock(map->lock_arg);
fc3ebd78 2753 return ret;
018690d3
MB
2754}
2755EXPORT_SYMBOL_GPL(regmap_update_bits_check);
2756
915f441b
MB
2757/**
2758 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2759 * register map asynchronously and report if
2760 * updated
2761 *
2762 * @map: Register map to update
2763 * @reg: Register to update
2764 * @mask: Bitmask to change
2765 * @val: New value for bitmask
2766 * @change: Boolean indicating if a write was done
2767 *
2768 * With most buses the read must be done synchronously so this is most
2769 * useful for devices with a cache which do not need to interact with
2770 * the hardware to determine the current register value.
2771 *
2772 * Returns zero for success, a negative number on error.
2773 */
2774int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
2775 unsigned int mask, unsigned int val,
2776 bool *change)
2777{
2778 int ret;
2779
2780 map->lock(map->lock_arg);
2781
2782 map->async = true;
2783
7ff0589c 2784 ret = _regmap_update_bits(map, reg, mask, val, change, false);
915f441b
MB
2785
2786 map->async = false;
2787
2788 map->unlock(map->lock_arg);
2789
2790 return ret;
2791}
2792EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
2793
0d509f2b
MB
2794void regmap_async_complete_cb(struct regmap_async *async, int ret)
2795{
2796 struct regmap *map = async->map;
2797 bool wake;
2798
c6b570d9 2799 trace_regmap_async_io_complete(map);
fe7d4ccd 2800
0d509f2b 2801 spin_lock(&map->async_lock);
7e09a979 2802 list_move(&async->list, &map->async_free);
0d509f2b
MB
2803 wake = list_empty(&map->async_list);
2804
2805 if (ret != 0)
2806 map->async_ret = ret;
2807
2808 spin_unlock(&map->async_lock);
2809
0d509f2b
MB
2810 if (wake)
2811 wake_up(&map->async_waitq);
2812}
f804fb56 2813EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
0d509f2b
MB
2814
2815static int regmap_async_is_done(struct regmap *map)
2816{
2817 unsigned long flags;
2818 int ret;
2819
2820 spin_lock_irqsave(&map->async_lock, flags);
2821 ret = list_empty(&map->async_list);
2822 spin_unlock_irqrestore(&map->async_lock, flags);
2823
2824 return ret;
2825}
2826
2827/**
2828 * regmap_async_complete: Ensure all asynchronous I/O has completed.
2829 *
2830 * @map: Map to operate on.
2831 *
2832 * Blocks until any pending asynchronous I/O has completed. Returns
2833 * an error code for any failed I/O operations.
2834 */
2835int regmap_async_complete(struct regmap *map)
2836{
2837 unsigned long flags;
2838 int ret;
2839
2840 /* Nothing to do with no async support */
f2e055e7 2841 if (!map->bus || !map->bus->async_write)
0d509f2b
MB
2842 return 0;
2843
c6b570d9 2844 trace_regmap_async_complete_start(map);
fe7d4ccd 2845
0d509f2b
MB
2846 wait_event(map->async_waitq, regmap_async_is_done(map));
2847
2848 spin_lock_irqsave(&map->async_lock, flags);
2849 ret = map->async_ret;
2850 map->async_ret = 0;
2851 spin_unlock_irqrestore(&map->async_lock, flags);
2852
c6b570d9 2853 trace_regmap_async_complete_done(map);
fe7d4ccd 2854
0d509f2b
MB
2855 return ret;
2856}
f88948ef 2857EXPORT_SYMBOL_GPL(regmap_async_complete);
0d509f2b 2858
22f0d90a
MB
2859/**
2860 * regmap_register_patch: Register and apply register updates to be applied
2861 * on device initialistion
2862 *
2863 * @map: Register map to apply updates to.
2864 * @regs: Values to update.
2865 * @num_regs: Number of entries in regs.
2866 *
2867 * Register a set of register updates to be applied to the device
2868 * whenever the device registers are synchronised with the cache and
2869 * apply them immediately. Typically this is used to apply
2870 * corrections to be applied to the device defaults on startup, such
2871 * as the updates some vendors provide to undocumented registers.
56fb1c74
MB
2872 *
2873 * The caller must ensure that this function cannot be called
2874 * concurrently with either itself or regcache_sync().
22f0d90a 2875 */
8019ff6c 2876int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
22f0d90a
MB
2877 int num_regs)
2878{
8019ff6c 2879 struct reg_sequence *p;
6bf13103 2880 int ret;
22f0d90a
MB
2881 bool bypass;
2882
bd60e381
CZ
2883 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2884 num_regs))
2885 return 0;
2886
aab13ebc 2887 p = krealloc(map->patch,
8019ff6c 2888 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
aab13ebc
MB
2889 GFP_KERNEL);
2890 if (p) {
2891 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2892 map->patch = p;
2893 map->patch_regs += num_regs;
22f0d90a 2894 } else {
56fb1c74 2895 return -ENOMEM;
22f0d90a
MB
2896 }
2897
0d4529c5 2898 map->lock(map->lock_arg);
22f0d90a
MB
2899
2900 bypass = map->cache_bypass;
2901
2902 map->cache_bypass = true;
1a25f261 2903 map->async = true;
22f0d90a 2904
6bf13103 2905 ret = _regmap_multi_reg_write(map, regs, num_regs);
22f0d90a 2906
1a25f261 2907 map->async = false;
22f0d90a
MB
2908 map->cache_bypass = bypass;
2909
0d4529c5 2910 map->unlock(map->lock_arg);
22f0d90a 2911
1a25f261
MB
2912 regmap_async_complete(map);
2913
22f0d90a
MB
2914 return ret;
2915}
2916EXPORT_SYMBOL_GPL(regmap_register_patch);
2917
eae4b51b 2918/*
a6539c32
MB
2919 * regmap_get_val_bytes(): Report the size of a register value
2920 *
2921 * Report the size of a register value, mainly intended to for use by
2922 * generic infrastructure built on top of regmap.
2923 */
2924int regmap_get_val_bytes(struct regmap *map)
2925{
2926 if (map->format.format_write)
2927 return -EINVAL;
2928
2929 return map->format.val_bytes;
2930}
2931EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2932
668abc72
SK
2933/**
2934 * regmap_get_max_register(): Report the max register value
2935 *
2936 * Report the max register value, mainly intended to for use by
2937 * generic infrastructure built on top of regmap.
2938 */
2939int regmap_get_max_register(struct regmap *map)
2940{
2941 return map->max_register ? map->max_register : -EINVAL;
2942}
2943EXPORT_SYMBOL_GPL(regmap_get_max_register);
2944
a2f776cb
SK
2945/**
2946 * regmap_get_reg_stride(): Report the register address stride
2947 *
2948 * Report the register address stride, mainly intended to for use by
2949 * generic infrastructure built on top of regmap.
2950 */
2951int regmap_get_reg_stride(struct regmap *map)
2952{
2953 return map->reg_stride;
2954}
2955EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
2956
13ff50c8
NC
2957int regmap_parse_val(struct regmap *map, const void *buf,
2958 unsigned int *val)
2959{
2960 if (!map->format.parse_val)
2961 return -EINVAL;
2962
2963 *val = map->format.parse_val(buf);
2964
2965 return 0;
2966}
2967EXPORT_SYMBOL_GPL(regmap_parse_val);
2968
31244e39
MB
2969static int __init regmap_initcall(void)
2970{
2971 regmap_debugfs_initcall();
2972
2973 return 0;
2974}
2975postcore_initcall(regmap_initcall);
This page took 0.409082 seconds and 5 git commands to generate.