regmap: Add regmap_get_device
[deliverable/linux.git] / drivers / base / regmap / regmap.c
CommitLineData
b83a313b
MB
1/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
f5d6eba7 13#include <linux/device.h>
b83a313b 14#include <linux/slab.h>
19694b5e 15#include <linux/export.h>
b83a313b
MB
16#include <linux/mutex.h>
17#include <linux/err.h>
6863ca62 18#include <linux/rbtree.h>
30b2a553 19#include <linux/sched.h>
b83a313b 20
fb2736bb
MB
21#define CREATE_TRACE_POINTS
22#include <trace/events/regmap.h>
23
93de9124 24#include "internal.h"
b83a313b 25
1044c180
MB
26/*
27 * Sometimes for failures during very early init the trace
28 * infrastructure isn't available early enough to be used. For this
29 * sort of problem defining LOG_DEVICE will add printks for basic
30 * register I/O on a specific device.
31 */
32#undef LOG_DEVICE
33
34static int _regmap_update_bits(struct regmap *map, unsigned int reg,
35 unsigned int mask, unsigned int val,
36 bool *change);
37
3ac17037
BB
38static int _regmap_bus_reg_read(void *context, unsigned int reg,
39 unsigned int *val);
ad278406
AS
40static int _regmap_bus_read(void *context, unsigned int reg,
41 unsigned int *val);
07c320dc
AS
42static int _regmap_bus_formatted_write(void *context, unsigned int reg,
43 unsigned int val);
3ac17037
BB
44static int _regmap_bus_reg_write(void *context, unsigned int reg,
45 unsigned int val);
07c320dc
AS
46static int _regmap_bus_raw_write(void *context, unsigned int reg,
47 unsigned int val);
ad278406 48
76aad392
DC
49bool regmap_reg_in_ranges(unsigned int reg,
50 const struct regmap_range *ranges,
51 unsigned int nranges)
52{
53 const struct regmap_range *r;
54 int i;
55
56 for (i = 0, r = ranges; i < nranges; i++, r++)
57 if (regmap_reg_in_range(reg, r))
58 return true;
59 return false;
60}
61EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
62
154881e5
MB
63bool regmap_check_range_table(struct regmap *map, unsigned int reg,
64 const struct regmap_access_table *table)
76aad392
DC
65{
66 /* Check "no ranges" first */
67 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
68 return false;
69
70 /* In case zero "yes ranges" are supplied, any reg is OK */
71 if (!table->n_yes_ranges)
72 return true;
73
74 return regmap_reg_in_ranges(reg, table->yes_ranges,
75 table->n_yes_ranges);
76}
154881e5 77EXPORT_SYMBOL_GPL(regmap_check_range_table);
76aad392 78
8de2f081
MB
79bool regmap_writeable(struct regmap *map, unsigned int reg)
80{
81 if (map->max_register && reg > map->max_register)
82 return false;
83
84 if (map->writeable_reg)
85 return map->writeable_reg(map->dev, reg);
86
76aad392 87 if (map->wr_table)
154881e5 88 return regmap_check_range_table(map, reg, map->wr_table);
76aad392 89
8de2f081
MB
90 return true;
91}
92
93bool regmap_readable(struct regmap *map, unsigned int reg)
94{
95 if (map->max_register && reg > map->max_register)
96 return false;
97
4191f197
WS
98 if (map->format.format_write)
99 return false;
100
8de2f081
MB
101 if (map->readable_reg)
102 return map->readable_reg(map->dev, reg);
103
76aad392 104 if (map->rd_table)
154881e5 105 return regmap_check_range_table(map, reg, map->rd_table);
76aad392 106
8de2f081
MB
107 return true;
108}
109
110bool regmap_volatile(struct regmap *map, unsigned int reg)
111{
4191f197 112 if (!regmap_readable(map, reg))
8de2f081
MB
113 return false;
114
115 if (map->volatile_reg)
116 return map->volatile_reg(map->dev, reg);
117
76aad392 118 if (map->volatile_table)
154881e5 119 return regmap_check_range_table(map, reg, map->volatile_table);
76aad392 120
b92be6fe
MB
121 if (map->cache_ops)
122 return false;
123 else
124 return true;
8de2f081
MB
125}
126
127bool regmap_precious(struct regmap *map, unsigned int reg)
128{
4191f197 129 if (!regmap_readable(map, reg))
8de2f081
MB
130 return false;
131
132 if (map->precious_reg)
133 return map->precious_reg(map->dev, reg);
134
76aad392 135 if (map->precious_table)
154881e5 136 return regmap_check_range_table(map, reg, map->precious_table);
76aad392 137
8de2f081
MB
138 return false;
139}
140
82cd9965 141static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
a8f28cfa 142 size_t num)
82cd9965
LPC
143{
144 unsigned int i;
145
146 for (i = 0; i < num; i++)
147 if (!regmap_volatile(map, reg + i))
148 return false;
149
150 return true;
151}
152
9aa50750
WS
153static void regmap_format_2_6_write(struct regmap *map,
154 unsigned int reg, unsigned int val)
155{
156 u8 *out = map->work_buf;
157
158 *out = (reg << 6) | val;
159}
160
b83a313b
MB
161static void regmap_format_4_12_write(struct regmap *map,
162 unsigned int reg, unsigned int val)
163{
164 __be16 *out = map->work_buf;
165 *out = cpu_to_be16((reg << 12) | val);
166}
167
168static void regmap_format_7_9_write(struct regmap *map,
169 unsigned int reg, unsigned int val)
170{
171 __be16 *out = map->work_buf;
172 *out = cpu_to_be16((reg << 9) | val);
173}
174
7e5ec63e
LPC
175static void regmap_format_10_14_write(struct regmap *map,
176 unsigned int reg, unsigned int val)
177{
178 u8 *out = map->work_buf;
179
180 out[2] = val;
181 out[1] = (val >> 8) | (reg << 6);
182 out[0] = reg >> 2;
183}
184
d939fb9a 185static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
b83a313b
MB
186{
187 u8 *b = buf;
188
d939fb9a 189 b[0] = val << shift;
b83a313b
MB
190}
191
141eba2e 192static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
b83a313b
MB
193{
194 __be16 *b = buf;
195
d939fb9a 196 b[0] = cpu_to_be16(val << shift);
b83a313b
MB
197}
198
4aa8c069
XL
199static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
200{
201 __le16 *b = buf;
202
203 b[0] = cpu_to_le16(val << shift);
204}
205
141eba2e
SW
206static void regmap_format_16_native(void *buf, unsigned int val,
207 unsigned int shift)
208{
209 *(u16 *)buf = val << shift;
210}
211
d939fb9a 212static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
ea279fc5
MR
213{
214 u8 *b = buf;
215
d939fb9a
MR
216 val <<= shift;
217
ea279fc5
MR
218 b[0] = val >> 16;
219 b[1] = val >> 8;
220 b[2] = val;
221}
222
141eba2e 223static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
7d5e525b
MB
224{
225 __be32 *b = buf;
226
d939fb9a 227 b[0] = cpu_to_be32(val << shift);
7d5e525b
MB
228}
229
4aa8c069
XL
230static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
231{
232 __le32 *b = buf;
233
234 b[0] = cpu_to_le32(val << shift);
235}
236
141eba2e
SW
237static void regmap_format_32_native(void *buf, unsigned int val,
238 unsigned int shift)
239{
240 *(u32 *)buf = val << shift;
241}
242
8a819ff8 243static void regmap_parse_inplace_noop(void *buf)
b83a313b 244{
8a819ff8
MB
245}
246
247static unsigned int regmap_parse_8(const void *buf)
248{
249 const u8 *b = buf;
b83a313b
MB
250
251 return b[0];
252}
253
8a819ff8
MB
254static unsigned int regmap_parse_16_be(const void *buf)
255{
256 const __be16 *b = buf;
257
258 return be16_to_cpu(b[0]);
259}
260
4aa8c069
XL
261static unsigned int regmap_parse_16_le(const void *buf)
262{
263 const __le16 *b = buf;
264
265 return le16_to_cpu(b[0]);
266}
267
8a819ff8 268static void regmap_parse_16_be_inplace(void *buf)
b83a313b
MB
269{
270 __be16 *b = buf;
271
272 b[0] = be16_to_cpu(b[0]);
b83a313b
MB
273}
274
4aa8c069
XL
275static void regmap_parse_16_le_inplace(void *buf)
276{
277 __le16 *b = buf;
278
279 b[0] = le16_to_cpu(b[0]);
280}
281
8a819ff8 282static unsigned int regmap_parse_16_native(const void *buf)
141eba2e
SW
283{
284 return *(u16 *)buf;
285}
286
8a819ff8 287static unsigned int regmap_parse_24(const void *buf)
ea279fc5 288{
8a819ff8 289 const u8 *b = buf;
ea279fc5
MR
290 unsigned int ret = b[2];
291 ret |= ((unsigned int)b[1]) << 8;
292 ret |= ((unsigned int)b[0]) << 16;
293
294 return ret;
295}
296
8a819ff8
MB
297static unsigned int regmap_parse_32_be(const void *buf)
298{
299 const __be32 *b = buf;
300
301 return be32_to_cpu(b[0]);
302}
303
4aa8c069
XL
304static unsigned int regmap_parse_32_le(const void *buf)
305{
306 const __le32 *b = buf;
307
308 return le32_to_cpu(b[0]);
309}
310
8a819ff8 311static void regmap_parse_32_be_inplace(void *buf)
7d5e525b
MB
312{
313 __be32 *b = buf;
314
315 b[0] = be32_to_cpu(b[0]);
7d5e525b
MB
316}
317
4aa8c069
XL
318static void regmap_parse_32_le_inplace(void *buf)
319{
320 __le32 *b = buf;
321
322 b[0] = le32_to_cpu(b[0]);
323}
324
8a819ff8 325static unsigned int regmap_parse_32_native(const void *buf)
141eba2e
SW
326{
327 return *(u32 *)buf;
328}
329
0d4529c5 330static void regmap_lock_mutex(void *__map)
bacdbe07 331{
0d4529c5 332 struct regmap *map = __map;
bacdbe07
SW
333 mutex_lock(&map->mutex);
334}
335
0d4529c5 336static void regmap_unlock_mutex(void *__map)
bacdbe07 337{
0d4529c5 338 struct regmap *map = __map;
bacdbe07
SW
339 mutex_unlock(&map->mutex);
340}
341
0d4529c5 342static void regmap_lock_spinlock(void *__map)
b4519c71 343__acquires(&map->spinlock)
bacdbe07 344{
0d4529c5 345 struct regmap *map = __map;
92ab1aab
LPC
346 unsigned long flags;
347
348 spin_lock_irqsave(&map->spinlock, flags);
349 map->spinlock_flags = flags;
bacdbe07
SW
350}
351
0d4529c5 352static void regmap_unlock_spinlock(void *__map)
b4519c71 353__releases(&map->spinlock)
bacdbe07 354{
0d4529c5 355 struct regmap *map = __map;
92ab1aab 356 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
bacdbe07
SW
357}
358
72b39f6f
MB
359static void dev_get_regmap_release(struct device *dev, void *res)
360{
361 /*
362 * We don't actually have anything to do here; the goal here
363 * is not to manage the regmap but to provide a simple way to
364 * get the regmap back given a struct device.
365 */
366}
367
6863ca62
KG
368static bool _regmap_range_add(struct regmap *map,
369 struct regmap_range_node *data)
370{
371 struct rb_root *root = &map->range_tree;
372 struct rb_node **new = &(root->rb_node), *parent = NULL;
373
374 while (*new) {
375 struct regmap_range_node *this =
376 container_of(*new, struct regmap_range_node, node);
377
378 parent = *new;
379 if (data->range_max < this->range_min)
380 new = &((*new)->rb_left);
381 else if (data->range_min > this->range_max)
382 new = &((*new)->rb_right);
383 else
384 return false;
385 }
386
387 rb_link_node(&data->node, parent, new);
388 rb_insert_color(&data->node, root);
389
390 return true;
391}
392
393static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
394 unsigned int reg)
395{
396 struct rb_node *node = map->range_tree.rb_node;
397
398 while (node) {
399 struct regmap_range_node *this =
400 container_of(node, struct regmap_range_node, node);
401
402 if (reg < this->range_min)
403 node = node->rb_left;
404 else if (reg > this->range_max)
405 node = node->rb_right;
406 else
407 return this;
408 }
409
410 return NULL;
411}
412
413static void regmap_range_exit(struct regmap *map)
414{
415 struct rb_node *next;
416 struct regmap_range_node *range_node;
417
418 next = rb_first(&map->range_tree);
419 while (next) {
420 range_node = rb_entry(next, struct regmap_range_node, node);
421 next = rb_next(&range_node->node);
422 rb_erase(&range_node->node, &map->range_tree);
423 kfree(range_node);
424 }
425
426 kfree(map->selector_work_buf);
427}
428
6cfec04b
MS
429int regmap_attach_dev(struct device *dev, struct regmap *map,
430 const struct regmap_config *config)
431{
432 struct regmap **m;
433
434 map->dev = dev;
435
436 regmap_debugfs_init(map, config->name);
437
438 /* Add a devres resource for dev_get_regmap() */
439 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
440 if (!m) {
441 regmap_debugfs_exit(map);
442 return -ENOMEM;
443 }
444 *m = map;
445 devres_add(dev, m);
446
447 return 0;
448}
449EXPORT_SYMBOL_GPL(regmap_attach_dev);
450
b83a313b
MB
451/**
452 * regmap_init(): Initialise register map
453 *
454 * @dev: Device that will be interacted with
455 * @bus: Bus-specific callbacks to use with device
0135bbcc 456 * @bus_context: Data passed to bus-specific callbacks
b83a313b
MB
457 * @config: Configuration for register map
458 *
459 * The return value will be an ERR_PTR() on error or a valid pointer to
460 * a struct regmap. This function should generally not be called
461 * directly, it should be called by bus-specific init functions.
462 */
463struct regmap *regmap_init(struct device *dev,
464 const struct regmap_bus *bus,
0135bbcc 465 void *bus_context,
b83a313b
MB
466 const struct regmap_config *config)
467{
6cfec04b 468 struct regmap *map;
b83a313b 469 int ret = -EINVAL;
141eba2e 470 enum regmap_endian reg_endian, val_endian;
6863ca62 471 int i, j;
b83a313b 472
d2a5884a 473 if (!config)
abbb18fb 474 goto err;
b83a313b
MB
475
476 map = kzalloc(sizeof(*map), GFP_KERNEL);
477 if (map == NULL) {
478 ret = -ENOMEM;
479 goto err;
480 }
481
0d4529c5
DC
482 if (config->lock && config->unlock) {
483 map->lock = config->lock;
484 map->unlock = config->unlock;
485 map->lock_arg = config->lock_arg;
bacdbe07 486 } else {
d2a5884a
AS
487 if ((bus && bus->fast_io) ||
488 config->fast_io) {
0d4529c5
DC
489 spin_lock_init(&map->spinlock);
490 map->lock = regmap_lock_spinlock;
491 map->unlock = regmap_unlock_spinlock;
492 } else {
493 mutex_init(&map->mutex);
494 map->lock = regmap_lock_mutex;
495 map->unlock = regmap_unlock_mutex;
496 }
497 map->lock_arg = map;
bacdbe07 498 }
c212accc 499 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
82159ba8 500 map->format.pad_bytes = config->pad_bits / 8;
c212accc 501 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
5494a98f
FE
502 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
503 config->val_bits + config->pad_bits, 8);
d939fb9a 504 map->reg_shift = config->pad_bits % 8;
f01ee60f
SW
505 if (config->reg_stride)
506 map->reg_stride = config->reg_stride;
507 else
508 map->reg_stride = 1;
2e33caf1 509 map->use_single_rw = config->use_single_rw;
e894c3f4 510 map->can_multi_write = config->can_multi_write;
b83a313b
MB
511 map->dev = dev;
512 map->bus = bus;
0135bbcc 513 map->bus_context = bus_context;
2e2ae66d 514 map->max_register = config->max_register;
76aad392
DC
515 map->wr_table = config->wr_table;
516 map->rd_table = config->rd_table;
517 map->volatile_table = config->volatile_table;
518 map->precious_table = config->precious_table;
2e2ae66d
MB
519 map->writeable_reg = config->writeable_reg;
520 map->readable_reg = config->readable_reg;
521 map->volatile_reg = config->volatile_reg;
2efe1642 522 map->precious_reg = config->precious_reg;
5d1729e7 523 map->cache_type = config->cache_type;
72b39f6f 524 map->name = config->name;
b83a313b 525
0d509f2b
MB
526 spin_lock_init(&map->async_lock);
527 INIT_LIST_HEAD(&map->async_list);
7e09a979 528 INIT_LIST_HEAD(&map->async_free);
0d509f2b
MB
529 init_waitqueue_head(&map->async_waitq);
530
6f306441
LPC
531 if (config->read_flag_mask || config->write_flag_mask) {
532 map->read_flag_mask = config->read_flag_mask;
533 map->write_flag_mask = config->write_flag_mask;
d2a5884a 534 } else if (bus) {
6f306441
LPC
535 map->read_flag_mask = bus->read_flag_mask;
536 }
537
d2a5884a
AS
538 if (!bus) {
539 map->reg_read = config->reg_read;
540 map->reg_write = config->reg_write;
541
3ac17037
BB
542 map->defer_caching = false;
543 goto skip_format_initialization;
544 } else if (!bus->read || !bus->write) {
545 map->reg_read = _regmap_bus_reg_read;
546 map->reg_write = _regmap_bus_reg_write;
547
d2a5884a
AS
548 map->defer_caching = false;
549 goto skip_format_initialization;
550 } else {
551 map->reg_read = _regmap_bus_read;
552 }
ad278406 553
141eba2e
SW
554 reg_endian = config->reg_format_endian;
555 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
556 reg_endian = bus->reg_format_endian_default;
557 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
558 reg_endian = REGMAP_ENDIAN_BIG;
559
560 val_endian = config->val_format_endian;
561 if (val_endian == REGMAP_ENDIAN_DEFAULT)
562 val_endian = bus->val_format_endian_default;
563 if (val_endian == REGMAP_ENDIAN_DEFAULT)
564 val_endian = REGMAP_ENDIAN_BIG;
565
d939fb9a 566 switch (config->reg_bits + map->reg_shift) {
9aa50750
WS
567 case 2:
568 switch (config->val_bits) {
569 case 6:
570 map->format.format_write = regmap_format_2_6_write;
571 break;
572 default:
573 goto err_map;
574 }
575 break;
576
b83a313b
MB
577 case 4:
578 switch (config->val_bits) {
579 case 12:
580 map->format.format_write = regmap_format_4_12_write;
581 break;
582 default:
583 goto err_map;
584 }
585 break;
586
587 case 7:
588 switch (config->val_bits) {
589 case 9:
590 map->format.format_write = regmap_format_7_9_write;
591 break;
592 default:
593 goto err_map;
594 }
595 break;
596
7e5ec63e
LPC
597 case 10:
598 switch (config->val_bits) {
599 case 14:
600 map->format.format_write = regmap_format_10_14_write;
601 break;
602 default:
603 goto err_map;
604 }
605 break;
606
b83a313b
MB
607 case 8:
608 map->format.format_reg = regmap_format_8;
609 break;
610
611 case 16:
141eba2e
SW
612 switch (reg_endian) {
613 case REGMAP_ENDIAN_BIG:
614 map->format.format_reg = regmap_format_16_be;
615 break;
616 case REGMAP_ENDIAN_NATIVE:
617 map->format.format_reg = regmap_format_16_native;
618 break;
619 default:
620 goto err_map;
621 }
b83a313b
MB
622 break;
623
237019e7
LPC
624 case 24:
625 if (reg_endian != REGMAP_ENDIAN_BIG)
626 goto err_map;
627 map->format.format_reg = regmap_format_24;
628 break;
629
7d5e525b 630 case 32:
141eba2e
SW
631 switch (reg_endian) {
632 case REGMAP_ENDIAN_BIG:
633 map->format.format_reg = regmap_format_32_be;
634 break;
635 case REGMAP_ENDIAN_NATIVE:
636 map->format.format_reg = regmap_format_32_native;
637 break;
638 default:
639 goto err_map;
640 }
7d5e525b
MB
641 break;
642
b83a313b
MB
643 default:
644 goto err_map;
645 }
646
8a819ff8
MB
647 if (val_endian == REGMAP_ENDIAN_NATIVE)
648 map->format.parse_inplace = regmap_parse_inplace_noop;
649
b83a313b
MB
650 switch (config->val_bits) {
651 case 8:
652 map->format.format_val = regmap_format_8;
653 map->format.parse_val = regmap_parse_8;
8a819ff8 654 map->format.parse_inplace = regmap_parse_inplace_noop;
b83a313b
MB
655 break;
656 case 16:
141eba2e
SW
657 switch (val_endian) {
658 case REGMAP_ENDIAN_BIG:
659 map->format.format_val = regmap_format_16_be;
660 map->format.parse_val = regmap_parse_16_be;
8a819ff8 661 map->format.parse_inplace = regmap_parse_16_be_inplace;
141eba2e 662 break;
4aa8c069
XL
663 case REGMAP_ENDIAN_LITTLE:
664 map->format.format_val = regmap_format_16_le;
665 map->format.parse_val = regmap_parse_16_le;
666 map->format.parse_inplace = regmap_parse_16_le_inplace;
667 break;
141eba2e
SW
668 case REGMAP_ENDIAN_NATIVE:
669 map->format.format_val = regmap_format_16_native;
670 map->format.parse_val = regmap_parse_16_native;
671 break;
672 default:
673 goto err_map;
674 }
b83a313b 675 break;
ea279fc5 676 case 24:
141eba2e
SW
677 if (val_endian != REGMAP_ENDIAN_BIG)
678 goto err_map;
ea279fc5
MR
679 map->format.format_val = regmap_format_24;
680 map->format.parse_val = regmap_parse_24;
681 break;
7d5e525b 682 case 32:
141eba2e
SW
683 switch (val_endian) {
684 case REGMAP_ENDIAN_BIG:
685 map->format.format_val = regmap_format_32_be;
686 map->format.parse_val = regmap_parse_32_be;
8a819ff8 687 map->format.parse_inplace = regmap_parse_32_be_inplace;
141eba2e 688 break;
4aa8c069
XL
689 case REGMAP_ENDIAN_LITTLE:
690 map->format.format_val = regmap_format_32_le;
691 map->format.parse_val = regmap_parse_32_le;
692 map->format.parse_inplace = regmap_parse_32_le_inplace;
693 break;
141eba2e
SW
694 case REGMAP_ENDIAN_NATIVE:
695 map->format.format_val = regmap_format_32_native;
696 map->format.parse_val = regmap_parse_32_native;
697 break;
698 default:
699 goto err_map;
700 }
7d5e525b 701 break;
b83a313b
MB
702 }
703
141eba2e
SW
704 if (map->format.format_write) {
705 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
706 (val_endian != REGMAP_ENDIAN_BIG))
707 goto err_map;
7a647614 708 map->use_single_rw = true;
141eba2e 709 }
7a647614 710
b83a313b
MB
711 if (!map->format.format_write &&
712 !(map->format.format_reg && map->format.format_val))
713 goto err_map;
714
82159ba8 715 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
b83a313b
MB
716 if (map->work_buf == NULL) {
717 ret = -ENOMEM;
5204f5e3 718 goto err_map;
b83a313b
MB
719 }
720
d2a5884a
AS
721 if (map->format.format_write) {
722 map->defer_caching = false;
07c320dc 723 map->reg_write = _regmap_bus_formatted_write;
d2a5884a
AS
724 } else if (map->format.format_val) {
725 map->defer_caching = true;
07c320dc 726 map->reg_write = _regmap_bus_raw_write;
d2a5884a
AS
727 }
728
729skip_format_initialization:
07c320dc 730
6863ca62 731 map->range_tree = RB_ROOT;
e3549cd0 732 for (i = 0; i < config->num_ranges; i++) {
6863ca62
KG
733 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
734 struct regmap_range_node *new;
735
736 /* Sanity check */
061adc06
MB
737 if (range_cfg->range_max < range_cfg->range_min) {
738 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
739 range_cfg->range_max, range_cfg->range_min);
6863ca62 740 goto err_range;
061adc06
MB
741 }
742
743 if (range_cfg->range_max > map->max_register) {
744 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
745 range_cfg->range_max, map->max_register);
746 goto err_range;
747 }
748
749 if (range_cfg->selector_reg > map->max_register) {
750 dev_err(map->dev,
751 "Invalid range %d: selector out of map\n", i);
752 goto err_range;
753 }
754
755 if (range_cfg->window_len == 0) {
756 dev_err(map->dev, "Invalid range %d: window_len 0\n",
757 i);
758 goto err_range;
759 }
6863ca62
KG
760
761 /* Make sure, that this register range has no selector
762 or data window within its boundary */
e3549cd0 763 for (j = 0; j < config->num_ranges; j++) {
6863ca62
KG
764 unsigned sel_reg = config->ranges[j].selector_reg;
765 unsigned win_min = config->ranges[j].window_start;
766 unsigned win_max = win_min +
767 config->ranges[j].window_len - 1;
768
f161d220
PZ
769 /* Allow data window inside its own virtual range */
770 if (j == i)
771 continue;
772
6863ca62
KG
773 if (range_cfg->range_min <= sel_reg &&
774 sel_reg <= range_cfg->range_max) {
061adc06
MB
775 dev_err(map->dev,
776 "Range %d: selector for %d in window\n",
777 i, j);
6863ca62
KG
778 goto err_range;
779 }
780
781 if (!(win_max < range_cfg->range_min ||
782 win_min > range_cfg->range_max)) {
061adc06
MB
783 dev_err(map->dev,
784 "Range %d: window for %d in window\n",
785 i, j);
6863ca62
KG
786 goto err_range;
787 }
788 }
789
790 new = kzalloc(sizeof(*new), GFP_KERNEL);
791 if (new == NULL) {
792 ret = -ENOMEM;
793 goto err_range;
794 }
795
4b020b3f 796 new->map = map;
d058bb49 797 new->name = range_cfg->name;
6863ca62
KG
798 new->range_min = range_cfg->range_min;
799 new->range_max = range_cfg->range_max;
800 new->selector_reg = range_cfg->selector_reg;
801 new->selector_mask = range_cfg->selector_mask;
802 new->selector_shift = range_cfg->selector_shift;
803 new->window_start = range_cfg->window_start;
804 new->window_len = range_cfg->window_len;
805
53e87f88 806 if (!_regmap_range_add(map, new)) {
061adc06 807 dev_err(map->dev, "Failed to add range %d\n", i);
6863ca62
KG
808 kfree(new);
809 goto err_range;
810 }
811
812 if (map->selector_work_buf == NULL) {
813 map->selector_work_buf =
814 kzalloc(map->format.buf_size, GFP_KERNEL);
815 if (map->selector_work_buf == NULL) {
816 ret = -ENOMEM;
817 goto err_range;
818 }
819 }
820 }
052d2cd1 821
e5e3b8ab 822 ret = regcache_init(map, config);
0ff3e62f 823 if (ret != 0)
6863ca62
KG
824 goto err_range;
825
a7a037c8 826 if (dev) {
6cfec04b
MS
827 ret = regmap_attach_dev(dev, map, config);
828 if (ret != 0)
829 goto err_regcache;
a7a037c8 830 }
72b39f6f 831
b83a313b
MB
832 return map;
833
6cfec04b 834err_regcache:
72b39f6f 835 regcache_exit(map);
6863ca62
KG
836err_range:
837 regmap_range_exit(map);
58072cbf 838 kfree(map->work_buf);
b83a313b
MB
839err_map:
840 kfree(map);
841err:
842 return ERR_PTR(ret);
843}
844EXPORT_SYMBOL_GPL(regmap_init);
845
c0eb4676
MB
846static void devm_regmap_release(struct device *dev, void *res)
847{
848 regmap_exit(*(struct regmap **)res);
849}
850
851/**
852 * devm_regmap_init(): Initialise managed register map
853 *
854 * @dev: Device that will be interacted with
855 * @bus: Bus-specific callbacks to use with device
0135bbcc 856 * @bus_context: Data passed to bus-specific callbacks
c0eb4676
MB
857 * @config: Configuration for register map
858 *
859 * The return value will be an ERR_PTR() on error or a valid pointer
860 * to a struct regmap. This function should generally not be called
861 * directly, it should be called by bus-specific init functions. The
862 * map will be automatically freed by the device management code.
863 */
864struct regmap *devm_regmap_init(struct device *dev,
865 const struct regmap_bus *bus,
0135bbcc 866 void *bus_context,
c0eb4676
MB
867 const struct regmap_config *config)
868{
869 struct regmap **ptr, *regmap;
870
871 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
872 if (!ptr)
873 return ERR_PTR(-ENOMEM);
874
0135bbcc 875 regmap = regmap_init(dev, bus, bus_context, config);
c0eb4676
MB
876 if (!IS_ERR(regmap)) {
877 *ptr = regmap;
878 devres_add(dev, ptr);
879 } else {
880 devres_free(ptr);
881 }
882
883 return regmap;
884}
885EXPORT_SYMBOL_GPL(devm_regmap_init);
886
67252287
SK
887static void regmap_field_init(struct regmap_field *rm_field,
888 struct regmap *regmap, struct reg_field reg_field)
889{
890 int field_bits = reg_field.msb - reg_field.lsb + 1;
891 rm_field->regmap = regmap;
892 rm_field->reg = reg_field.reg;
893 rm_field->shift = reg_field.lsb;
894 rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
a0102375
KM
895 rm_field->id_size = reg_field.id_size;
896 rm_field->id_offset = reg_field.id_offset;
67252287
SK
897}
898
899/**
900 * devm_regmap_field_alloc(): Allocate and initialise a register field
901 * in a register map.
902 *
903 * @dev: Device that will be interacted with
904 * @regmap: regmap bank in which this register field is located.
905 * @reg_field: Register field with in the bank.
906 *
907 * The return value will be an ERR_PTR() on error or a valid pointer
908 * to a struct regmap_field. The regmap_field will be automatically freed
909 * by the device management code.
910 */
911struct regmap_field *devm_regmap_field_alloc(struct device *dev,
912 struct regmap *regmap, struct reg_field reg_field)
913{
914 struct regmap_field *rm_field = devm_kzalloc(dev,
915 sizeof(*rm_field), GFP_KERNEL);
916 if (!rm_field)
917 return ERR_PTR(-ENOMEM);
918
919 regmap_field_init(rm_field, regmap, reg_field);
920
921 return rm_field;
922
923}
924EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
925
926/**
927 * devm_regmap_field_free(): Free register field allocated using
928 * devm_regmap_field_alloc. Usally drivers need not call this function,
929 * as the memory allocated via devm will be freed as per device-driver
930 * life-cyle.
931 *
932 * @dev: Device that will be interacted with
933 * @field: regmap field which should be freed.
934 */
935void devm_regmap_field_free(struct device *dev,
936 struct regmap_field *field)
937{
938 devm_kfree(dev, field);
939}
940EXPORT_SYMBOL_GPL(devm_regmap_field_free);
941
942/**
943 * regmap_field_alloc(): Allocate and initialise a register field
944 * in a register map.
945 *
946 * @regmap: regmap bank in which this register field is located.
947 * @reg_field: Register field with in the bank.
948 *
949 * The return value will be an ERR_PTR() on error or a valid pointer
950 * to a struct regmap_field. The regmap_field should be freed by the
951 * user once its finished working with it using regmap_field_free().
952 */
953struct regmap_field *regmap_field_alloc(struct regmap *regmap,
954 struct reg_field reg_field)
955{
956 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
957
958 if (!rm_field)
959 return ERR_PTR(-ENOMEM);
960
961 regmap_field_init(rm_field, regmap, reg_field);
962
963 return rm_field;
964}
965EXPORT_SYMBOL_GPL(regmap_field_alloc);
966
967/**
968 * regmap_field_free(): Free register field allocated using regmap_field_alloc
969 *
970 * @field: regmap field which should be freed.
971 */
972void regmap_field_free(struct regmap_field *field)
973{
974 kfree(field);
975}
976EXPORT_SYMBOL_GPL(regmap_field_free);
977
bf315173
MB
978/**
979 * regmap_reinit_cache(): Reinitialise the current register cache
980 *
981 * @map: Register map to operate on.
982 * @config: New configuration. Only the cache data will be used.
983 *
984 * Discard any existing register cache for the map and initialize a
985 * new cache. This can be used to restore the cache to defaults or to
986 * update the cache configuration to reflect runtime discovery of the
987 * hardware.
4d879514
DP
988 *
989 * No explicit locking is done here, the user needs to ensure that
990 * this function will not race with other calls to regmap.
bf315173
MB
991 */
992int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
993{
bf315173 994 regcache_exit(map);
a24f64a6 995 regmap_debugfs_exit(map);
bf315173
MB
996
997 map->max_register = config->max_register;
998 map->writeable_reg = config->writeable_reg;
999 map->readable_reg = config->readable_reg;
1000 map->volatile_reg = config->volatile_reg;
1001 map->precious_reg = config->precious_reg;
1002 map->cache_type = config->cache_type;
1003
d3c242e1 1004 regmap_debugfs_init(map, config->name);
a24f64a6 1005
421e8d2d
MB
1006 map->cache_bypass = false;
1007 map->cache_only = false;
1008
4d879514 1009 return regcache_init(map, config);
bf315173 1010}
752a6a5f 1011EXPORT_SYMBOL_GPL(regmap_reinit_cache);
bf315173 1012
b83a313b
MB
1013/**
1014 * regmap_exit(): Free a previously allocated register map
1015 */
1016void regmap_exit(struct regmap *map)
1017{
7e09a979
MB
1018 struct regmap_async *async;
1019
5d1729e7 1020 regcache_exit(map);
31244e39 1021 regmap_debugfs_exit(map);
6863ca62 1022 regmap_range_exit(map);
d2a5884a 1023 if (map->bus && map->bus->free_context)
0135bbcc 1024 map->bus->free_context(map->bus_context);
b83a313b 1025 kfree(map->work_buf);
7e09a979
MB
1026 while (!list_empty(&map->async_free)) {
1027 async = list_first_entry_or_null(&map->async_free,
1028 struct regmap_async,
1029 list);
1030 list_del(&async->list);
1031 kfree(async->work_buf);
1032 kfree(async);
1033 }
b83a313b
MB
1034 kfree(map);
1035}
1036EXPORT_SYMBOL_GPL(regmap_exit);
1037
72b39f6f
MB
1038static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1039{
1040 struct regmap **r = res;
1041 if (!r || !*r) {
1042 WARN_ON(!r || !*r);
1043 return 0;
1044 }
1045
1046 /* If the user didn't specify a name match any */
1047 if (data)
1048 return (*r)->name == data;
1049 else
1050 return 1;
1051}
1052
1053/**
1054 * dev_get_regmap(): Obtain the regmap (if any) for a device
1055 *
1056 * @dev: Device to retrieve the map for
1057 * @name: Optional name for the register map, usually NULL.
1058 *
1059 * Returns the regmap for the device if one is present, or NULL. If
1060 * name is specified then it must match the name specified when
1061 * registering the device, if it is NULL then the first regmap found
1062 * will be used. Devices with multiple register maps are very rare,
1063 * generic code should normally not need to specify a name.
1064 */
1065struct regmap *dev_get_regmap(struct device *dev, const char *name)
1066{
1067 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1068 dev_get_regmap_match, (void *)name);
1069
1070 if (!r)
1071 return NULL;
1072 return *r;
1073}
1074EXPORT_SYMBOL_GPL(dev_get_regmap);
1075
8d7d3972
TT
1076/**
1077 * regmap_get_device(): Obtain the device from a regmap
1078 *
1079 * @map: Register map to operate on.
1080 *
1081 * Returns the underlying device that the regmap has been created for.
1082 */
1083struct device *regmap_get_device(struct regmap *map)
1084{
1085 return map->dev;
1086}
1087
6863ca62 1088static int _regmap_select_page(struct regmap *map, unsigned int *reg,
98bc7dfd 1089 struct regmap_range_node *range,
6863ca62
KG
1090 unsigned int val_num)
1091{
6863ca62
KG
1092 void *orig_work_buf;
1093 unsigned int win_offset;
1094 unsigned int win_page;
1095 bool page_chg;
1096 int ret;
1097
98bc7dfd
MB
1098 win_offset = (*reg - range->range_min) % range->window_len;
1099 win_page = (*reg - range->range_min) / range->window_len;
6863ca62 1100
98bc7dfd
MB
1101 if (val_num > 1) {
1102 /* Bulk write shouldn't cross range boundary */
1103 if (*reg + val_num - 1 > range->range_max)
1104 return -EINVAL;
6863ca62 1105
98bc7dfd
MB
1106 /* ... or single page boundary */
1107 if (val_num > range->window_len - win_offset)
1108 return -EINVAL;
1109 }
6863ca62 1110
98bc7dfd
MB
1111 /* It is possible to have selector register inside data window.
1112 In that case, selector register is located on every page and
1113 it needs no page switching, when accessed alone. */
1114 if (val_num > 1 ||
1115 range->window_start + win_offset != range->selector_reg) {
1116 /* Use separate work_buf during page switching */
1117 orig_work_buf = map->work_buf;
1118 map->work_buf = map->selector_work_buf;
6863ca62 1119
98bc7dfd
MB
1120 ret = _regmap_update_bits(map, range->selector_reg,
1121 range->selector_mask,
1122 win_page << range->selector_shift,
1123 &page_chg);
632a5b01 1124
98bc7dfd 1125 map->work_buf = orig_work_buf;
6863ca62 1126
0ff3e62f 1127 if (ret != 0)
98bc7dfd 1128 return ret;
6863ca62
KG
1129 }
1130
98bc7dfd
MB
1131 *reg = range->window_start + win_offset;
1132
6863ca62
KG
1133 return 0;
1134}
1135
584de329 1136int _regmap_raw_write(struct regmap *map, unsigned int reg,
0a819809 1137 const void *val, size_t val_len)
b83a313b 1138{
98bc7dfd 1139 struct regmap_range_node *range;
0d509f2b 1140 unsigned long flags;
6f306441 1141 u8 *u8 = map->work_buf;
0d509f2b
MB
1142 void *work_val = map->work_buf + map->format.reg_bytes +
1143 map->format.pad_bytes;
b83a313b
MB
1144 void *buf;
1145 int ret = -ENOTSUPP;
1146 size_t len;
73304781
MB
1147 int i;
1148
f1b5c5c3 1149 WARN_ON(!map->bus);
d2a5884a 1150
73304781
MB
1151 /* Check for unwritable registers before we start */
1152 if (map->writeable_reg)
1153 for (i = 0; i < val_len / map->format.val_bytes; i++)
f01ee60f
SW
1154 if (!map->writeable_reg(map->dev,
1155 reg + (i * map->reg_stride)))
73304781 1156 return -EINVAL;
b83a313b 1157
c9157198
LD
1158 if (!map->cache_bypass && map->format.parse_val) {
1159 unsigned int ival;
1160 int val_bytes = map->format.val_bytes;
1161 for (i = 0; i < val_len / val_bytes; i++) {
5a08d156 1162 ival = map->format.parse_val(val + (i * val_bytes));
f01ee60f
SW
1163 ret = regcache_write(map, reg + (i * map->reg_stride),
1164 ival);
c9157198
LD
1165 if (ret) {
1166 dev_err(map->dev,
6d04b8ac 1167 "Error in caching of register: %x ret: %d\n",
c9157198
LD
1168 reg + i, ret);
1169 return ret;
1170 }
1171 }
1172 if (map->cache_only) {
1173 map->cache_dirty = true;
1174 return 0;
1175 }
1176 }
1177
98bc7dfd
MB
1178 range = _regmap_range_lookup(map, reg);
1179 if (range) {
8a2ceac6
MB
1180 int val_num = val_len / map->format.val_bytes;
1181 int win_offset = (reg - range->range_min) % range->window_len;
1182 int win_residue = range->window_len - win_offset;
1183
1184 /* If the write goes beyond the end of the window split it */
1185 while (val_num > win_residue) {
1a61cfe3 1186 dev_dbg(map->dev, "Writing window %d/%zu\n",
8a2ceac6
MB
1187 win_residue, val_len / map->format.val_bytes);
1188 ret = _regmap_raw_write(map, reg, val, win_residue *
0a819809 1189 map->format.val_bytes);
8a2ceac6
MB
1190 if (ret != 0)
1191 return ret;
1192
1193 reg += win_residue;
1194 val_num -= win_residue;
1195 val += win_residue * map->format.val_bytes;
1196 val_len -= win_residue * map->format.val_bytes;
1197
1198 win_offset = (reg - range->range_min) %
1199 range->window_len;
1200 win_residue = range->window_len - win_offset;
1201 }
1202
1203 ret = _regmap_select_page(map, &reg, range, val_num);
0ff3e62f 1204 if (ret != 0)
98bc7dfd
MB
1205 return ret;
1206 }
6863ca62 1207
d939fb9a 1208 map->format.format_reg(map->work_buf, reg, map->reg_shift);
b83a313b 1209
6f306441
LPC
1210 u8[0] |= map->write_flag_mask;
1211
651e013e
MB
1212 /*
1213 * Essentially all I/O mechanisms will be faster with a single
1214 * buffer to write. Since register syncs often generate raw
1215 * writes of single registers optimise that case.
1216 */
1217 if (val != work_val && val_len == map->format.val_bytes) {
1218 memcpy(work_val, val, map->format.val_bytes);
1219 val = work_val;
1220 }
1221
0a819809 1222 if (map->async && map->bus->async_write) {
7e09a979 1223 struct regmap_async *async;
0d509f2b 1224
fe7d4ccd
MB
1225 trace_regmap_async_write_start(map->dev, reg, val_len);
1226
7e09a979
MB
1227 spin_lock_irqsave(&map->async_lock, flags);
1228 async = list_first_entry_or_null(&map->async_free,
1229 struct regmap_async,
1230 list);
1231 if (async)
1232 list_del(&async->list);
1233 spin_unlock_irqrestore(&map->async_lock, flags);
1234
1235 if (!async) {
1236 async = map->bus->async_alloc();
1237 if (!async)
1238 return -ENOMEM;
1239
1240 async->work_buf = kzalloc(map->format.buf_size,
1241 GFP_KERNEL | GFP_DMA);
1242 if (!async->work_buf) {
1243 kfree(async);
1244 return -ENOMEM;
1245 }
0d509f2b
MB
1246 }
1247
0d509f2b
MB
1248 async->map = map;
1249
1250 /* If the caller supplied the value we can use it safely. */
1251 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1252 map->format.reg_bytes + map->format.val_bytes);
0d509f2b
MB
1253
1254 spin_lock_irqsave(&map->async_lock, flags);
1255 list_add_tail(&async->list, &map->async_list);
1256 spin_unlock_irqrestore(&map->async_lock, flags);
1257
04c50ccf
MB
1258 if (val != work_val)
1259 ret = map->bus->async_write(map->bus_context,
1260 async->work_buf,
1261 map->format.reg_bytes +
1262 map->format.pad_bytes,
1263 val, val_len, async);
1264 else
1265 ret = map->bus->async_write(map->bus_context,
1266 async->work_buf,
1267 map->format.reg_bytes +
1268 map->format.pad_bytes +
1269 val_len, NULL, 0, async);
0d509f2b
MB
1270
1271 if (ret != 0) {
1272 dev_err(map->dev, "Failed to schedule write: %d\n",
1273 ret);
1274
1275 spin_lock_irqsave(&map->async_lock, flags);
7e09a979 1276 list_move(&async->list, &map->async_free);
0d509f2b 1277 spin_unlock_irqrestore(&map->async_lock, flags);
0d509f2b 1278 }
f951b658
MB
1279
1280 return ret;
0d509f2b
MB
1281 }
1282
fb2736bb
MB
1283 trace_regmap_hw_write_start(map->dev, reg,
1284 val_len / map->format.val_bytes);
1285
2547e201
MB
1286 /* If we're doing a single register write we can probably just
1287 * send the work_buf directly, otherwise try to do a gather
1288 * write.
1289 */
0d509f2b 1290 if (val == work_val)
0135bbcc 1291 ret = map->bus->write(map->bus_context, map->work_buf,
82159ba8
MB
1292 map->format.reg_bytes +
1293 map->format.pad_bytes +
1294 val_len);
2547e201 1295 else if (map->bus->gather_write)
0135bbcc 1296 ret = map->bus->gather_write(map->bus_context, map->work_buf,
82159ba8
MB
1297 map->format.reg_bytes +
1298 map->format.pad_bytes,
b83a313b
MB
1299 val, val_len);
1300
2547e201 1301 /* If that didn't work fall back on linearising by hand. */
b83a313b 1302 if (ret == -ENOTSUPP) {
82159ba8
MB
1303 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1304 buf = kzalloc(len, GFP_KERNEL);
b83a313b
MB
1305 if (!buf)
1306 return -ENOMEM;
1307
1308 memcpy(buf, map->work_buf, map->format.reg_bytes);
82159ba8
MB
1309 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1310 val, val_len);
0135bbcc 1311 ret = map->bus->write(map->bus_context, buf, len);
b83a313b
MB
1312
1313 kfree(buf);
1314 }
1315
fb2736bb
MB
1316 trace_regmap_hw_write_done(map->dev, reg,
1317 val_len / map->format.val_bytes);
1318
b83a313b
MB
1319 return ret;
1320}
1321
221ad7f2
MB
1322/**
1323 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1324 *
1325 * @map: Map to check.
1326 */
1327bool regmap_can_raw_write(struct regmap *map)
1328{
1329 return map->bus && map->format.format_val && map->format.format_reg;
1330}
1331EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1332
07c320dc
AS
1333static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1334 unsigned int val)
1335{
1336 int ret;
1337 struct regmap_range_node *range;
1338 struct regmap *map = context;
1339
f1b5c5c3 1340 WARN_ON(!map->bus || !map->format.format_write);
07c320dc
AS
1341
1342 range = _regmap_range_lookup(map, reg);
1343 if (range) {
1344 ret = _regmap_select_page(map, &reg, range, 1);
1345 if (ret != 0)
1346 return ret;
1347 }
1348
1349 map->format.format_write(map, reg, val);
1350
1351 trace_regmap_hw_write_start(map->dev, reg, 1);
1352
1353 ret = map->bus->write(map->bus_context, map->work_buf,
1354 map->format.buf_size);
1355
1356 trace_regmap_hw_write_done(map->dev, reg, 1);
1357
1358 return ret;
1359}
1360
3ac17037
BB
1361static int _regmap_bus_reg_write(void *context, unsigned int reg,
1362 unsigned int val)
1363{
1364 struct regmap *map = context;
1365
1366 return map->bus->reg_write(map->bus_context, reg, val);
1367}
1368
07c320dc
AS
1369static int _regmap_bus_raw_write(void *context, unsigned int reg,
1370 unsigned int val)
1371{
1372 struct regmap *map = context;
1373
f1b5c5c3 1374 WARN_ON(!map->bus || !map->format.format_val);
07c320dc
AS
1375
1376 map->format.format_val(map->work_buf + map->format.reg_bytes
1377 + map->format.pad_bytes, val, 0);
1378 return _regmap_raw_write(map, reg,
1379 map->work_buf +
1380 map->format.reg_bytes +
1381 map->format.pad_bytes,
0a819809 1382 map->format.val_bytes);
07c320dc
AS
1383}
1384
d2a5884a
AS
1385static inline void *_regmap_map_get_context(struct regmap *map)
1386{
1387 return (map->bus) ? map : map->bus_context;
1388}
1389
4d2dc095
DP
1390int _regmap_write(struct regmap *map, unsigned int reg,
1391 unsigned int val)
b83a313b 1392{
fb2736bb 1393 int ret;
d2a5884a 1394 void *context = _regmap_map_get_context(map);
b83a313b 1395
515f2261
IN
1396 if (!regmap_writeable(map, reg))
1397 return -EIO;
1398
d2a5884a 1399 if (!map->cache_bypass && !map->defer_caching) {
5d1729e7
DP
1400 ret = regcache_write(map, reg, val);
1401 if (ret != 0)
1402 return ret;
8ae0d7e8
MB
1403 if (map->cache_only) {
1404 map->cache_dirty = true;
5d1729e7 1405 return 0;
8ae0d7e8 1406 }
5d1729e7
DP
1407 }
1408
1044c180
MB
1409#ifdef LOG_DEVICE
1410 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1411 dev_info(map->dev, "%x <= %x\n", reg, val);
1412#endif
1413
fb2736bb
MB
1414 trace_regmap_reg_write(map->dev, reg, val);
1415
d2a5884a 1416 return map->reg_write(context, reg, val);
b83a313b
MB
1417}
1418
1419/**
1420 * regmap_write(): Write a value to a single register
1421 *
1422 * @map: Register map to write to
1423 * @reg: Register to write to
1424 * @val: Value to be written
1425 *
1426 * A value of zero will be returned on success, a negative errno will
1427 * be returned in error cases.
1428 */
1429int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1430{
1431 int ret;
1432
f01ee60f
SW
1433 if (reg % map->reg_stride)
1434 return -EINVAL;
1435
0d4529c5 1436 map->lock(map->lock_arg);
b83a313b
MB
1437
1438 ret = _regmap_write(map, reg, val);
1439
0d4529c5 1440 map->unlock(map->lock_arg);
b83a313b
MB
1441
1442 return ret;
1443}
1444EXPORT_SYMBOL_GPL(regmap_write);
1445
915f441b
MB
1446/**
1447 * regmap_write_async(): Write a value to a single register asynchronously
1448 *
1449 * @map: Register map to write to
1450 * @reg: Register to write to
1451 * @val: Value to be written
1452 *
1453 * A value of zero will be returned on success, a negative errno will
1454 * be returned in error cases.
1455 */
1456int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1457{
1458 int ret;
1459
1460 if (reg % map->reg_stride)
1461 return -EINVAL;
1462
1463 map->lock(map->lock_arg);
1464
1465 map->async = true;
1466
1467 ret = _regmap_write(map, reg, val);
1468
1469 map->async = false;
1470
1471 map->unlock(map->lock_arg);
1472
1473 return ret;
1474}
1475EXPORT_SYMBOL_GPL(regmap_write_async);
1476
b83a313b
MB
1477/**
1478 * regmap_raw_write(): Write raw values to one or more registers
1479 *
1480 * @map: Register map to write to
1481 * @reg: Initial register to write to
1482 * @val: Block of data to be written, laid out for direct transmission to the
1483 * device
1484 * @val_len: Length of data pointed to by val.
1485 *
1486 * This function is intended to be used for things like firmware
1487 * download where a large block of data needs to be transferred to the
1488 * device. No formatting will be done on the data provided.
1489 *
1490 * A value of zero will be returned on success, a negative errno will
1491 * be returned in error cases.
1492 */
1493int regmap_raw_write(struct regmap *map, unsigned int reg,
1494 const void *val, size_t val_len)
1495{
1496 int ret;
1497
221ad7f2 1498 if (!regmap_can_raw_write(map))
d2a5884a 1499 return -EINVAL;
851960ba
SW
1500 if (val_len % map->format.val_bytes)
1501 return -EINVAL;
1502
0d4529c5 1503 map->lock(map->lock_arg);
b83a313b 1504
0a819809 1505 ret = _regmap_raw_write(map, reg, val, val_len);
b83a313b 1506
0d4529c5 1507 map->unlock(map->lock_arg);
b83a313b
MB
1508
1509 return ret;
1510}
1511EXPORT_SYMBOL_GPL(regmap_raw_write);
1512
67252287
SK
1513/**
1514 * regmap_field_write(): Write a value to a single register field
1515 *
1516 * @field: Register field to write to
1517 * @val: Value to be written
1518 *
1519 * A value of zero will be returned on success, a negative errno will
1520 * be returned in error cases.
1521 */
1522int regmap_field_write(struct regmap_field *field, unsigned int val)
1523{
1524 return regmap_update_bits(field->regmap, field->reg,
1525 field->mask, val << field->shift);
1526}
1527EXPORT_SYMBOL_GPL(regmap_field_write);
1528
fdf20029
KM
1529/**
1530 * regmap_field_update_bits(): Perform a read/modify/write cycle
1531 * on the register field
1532 *
1533 * @field: Register field to write to
1534 * @mask: Bitmask to change
1535 * @val: Value to be written
1536 *
1537 * A value of zero will be returned on success, a negative errno will
1538 * be returned in error cases.
1539 */
1540int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
1541{
1542 mask = (mask << field->shift) & field->mask;
1543
1544 return regmap_update_bits(field->regmap, field->reg,
1545 mask, val << field->shift);
1546}
1547EXPORT_SYMBOL_GPL(regmap_field_update_bits);
1548
a0102375
KM
1549/**
1550 * regmap_fields_write(): Write a value to a single register field with port ID
1551 *
1552 * @field: Register field to write to
1553 * @id: port ID
1554 * @val: Value to be written
1555 *
1556 * A value of zero will be returned on success, a negative errno will
1557 * be returned in error cases.
1558 */
1559int regmap_fields_write(struct regmap_field *field, unsigned int id,
1560 unsigned int val)
1561{
1562 if (id >= field->id_size)
1563 return -EINVAL;
1564
1565 return regmap_update_bits(field->regmap,
1566 field->reg + (field->id_offset * id),
1567 field->mask, val << field->shift);
1568}
1569EXPORT_SYMBOL_GPL(regmap_fields_write);
1570
1571/**
1572 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1573 * on the register field
1574 *
1575 * @field: Register field to write to
1576 * @id: port ID
1577 * @mask: Bitmask to change
1578 * @val: Value to be written
1579 *
1580 * A value of zero will be returned on success, a negative errno will
1581 * be returned in error cases.
1582 */
1583int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
1584 unsigned int mask, unsigned int val)
1585{
1586 if (id >= field->id_size)
1587 return -EINVAL;
1588
1589 mask = (mask << field->shift) & field->mask;
1590
1591 return regmap_update_bits(field->regmap,
1592 field->reg + (field->id_offset * id),
1593 mask, val << field->shift);
1594}
1595EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
1596
8eaeb219
LD
1597/*
1598 * regmap_bulk_write(): Write multiple registers to the device
1599 *
1600 * @map: Register map to write to
1601 * @reg: First register to be write from
1602 * @val: Block of data to be written, in native register size for device
1603 * @val_count: Number of registers to write
1604 *
1605 * This function is intended to be used for writing a large block of
31b35e9e 1606 * data to the device either in single transfer or multiple transfer.
8eaeb219
LD
1607 *
1608 * A value of zero will be returned on success, a negative errno will
1609 * be returned in error cases.
1610 */
1611int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1612 size_t val_count)
1613{
1614 int ret = 0, i;
1615 size_t val_bytes = map->format.val_bytes;
8eaeb219 1616
f4298360 1617 if (map->bus && !map->format.parse_inplace)
8eaeb219 1618 return -EINVAL;
f01ee60f
SW
1619 if (reg % map->reg_stride)
1620 return -EINVAL;
8eaeb219 1621
f4298360
SB
1622 /*
1623 * Some devices don't support bulk write, for
1624 * them we have a series of single write operations.
1625 */
1626 if (!map->bus || map->use_single_rw) {
4999e962 1627 map->lock(map->lock_arg);
f4298360
SB
1628 for (i = 0; i < val_count; i++) {
1629 unsigned int ival;
1630
1631 switch (val_bytes) {
1632 case 1:
1633 ival = *(u8 *)(val + (i * val_bytes));
1634 break;
1635 case 2:
1636 ival = *(u16 *)(val + (i * val_bytes));
1637 break;
1638 case 4:
1639 ival = *(u32 *)(val + (i * val_bytes));
1640 break;
1641#ifdef CONFIG_64BIT
1642 case 8:
1643 ival = *(u64 *)(val + (i * val_bytes));
1644 break;
1645#endif
1646 default:
1647 ret = -EINVAL;
1648 goto out;
1649 }
8eaeb219 1650
f4298360
SB
1651 ret = _regmap_write(map, reg + (i * map->reg_stride),
1652 ival);
1653 if (ret != 0)
1654 goto out;
1655 }
4999e962
TI
1656out:
1657 map->unlock(map->lock_arg);
8eaeb219 1658 } else {
f4298360
SB
1659 void *wval;
1660
8eaeb219
LD
1661 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1662 if (!wval) {
8eaeb219 1663 dev_err(map->dev, "Error in memory allocation\n");
4999e962 1664 return -ENOMEM;
8eaeb219
LD
1665 }
1666 for (i = 0; i < val_count * val_bytes; i += val_bytes)
8a819ff8 1667 map->format.parse_inplace(wval + i);
f4298360 1668
4999e962 1669 map->lock(map->lock_arg);
0a819809 1670 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
4999e962 1671 map->unlock(map->lock_arg);
8eaeb219 1672
8eaeb219 1673 kfree(wval);
f4298360 1674 }
8eaeb219
LD
1675 return ret;
1676}
1677EXPORT_SYMBOL_GPL(regmap_bulk_write);
1678
e894c3f4
OAO
1679/*
1680 * _regmap_raw_multi_reg_write()
1681 *
1682 * the (register,newvalue) pairs in regs have not been formatted, but
1683 * they are all in the same page and have been changed to being page
1684 * relative. The page register has been written if that was neccessary.
1685 */
1686static int _regmap_raw_multi_reg_write(struct regmap *map,
1687 const struct reg_default *regs,
1688 size_t num_regs)
1689{
1690 int ret;
1691 void *buf;
1692 int i;
1693 u8 *u8;
1694 size_t val_bytes = map->format.val_bytes;
1695 size_t reg_bytes = map->format.reg_bytes;
1696 size_t pad_bytes = map->format.pad_bytes;
1697 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1698 size_t len = pair_size * num_regs;
1699
f5727cd3
XL
1700 if (!len)
1701 return -EINVAL;
1702
e894c3f4
OAO
1703 buf = kzalloc(len, GFP_KERNEL);
1704 if (!buf)
1705 return -ENOMEM;
1706
1707 /* We have to linearise by hand. */
1708
1709 u8 = buf;
1710
1711 for (i = 0; i < num_regs; i++) {
1712 int reg = regs[i].reg;
1713 int val = regs[i].def;
1714 trace_regmap_hw_write_start(map->dev, reg, 1);
1715 map->format.format_reg(u8, reg, map->reg_shift);
1716 u8 += reg_bytes + pad_bytes;
1717 map->format.format_val(u8, val, 0);
1718 u8 += val_bytes;
1719 }
1720 u8 = buf;
1721 *u8 |= map->write_flag_mask;
1722
1723 ret = map->bus->write(map->bus_context, buf, len);
1724
1725 kfree(buf);
1726
1727 for (i = 0; i < num_regs; i++) {
1728 int reg = regs[i].reg;
1729 trace_regmap_hw_write_done(map->dev, reg, 1);
1730 }
1731 return ret;
1732}
1733
1734static unsigned int _regmap_register_page(struct regmap *map,
1735 unsigned int reg,
1736 struct regmap_range_node *range)
1737{
1738 unsigned int win_page = (reg - range->range_min) / range->window_len;
1739
1740 return win_page;
1741}
1742
1743static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1744 struct reg_default *regs,
1745 size_t num_regs)
1746{
1747 int ret;
1748 int i, n;
1749 struct reg_default *base;
b48d1398 1750 unsigned int this_page = 0;
e894c3f4
OAO
1751 /*
1752 * the set of registers are not neccessarily in order, but
1753 * since the order of write must be preserved this algorithm
1754 * chops the set each time the page changes
1755 */
1756 base = regs;
1757 for (i = 0, n = 0; i < num_regs; i++, n++) {
1758 unsigned int reg = regs[i].reg;
1759 struct regmap_range_node *range;
1760
1761 range = _regmap_range_lookup(map, reg);
1762 if (range) {
1763 unsigned int win_page = _regmap_register_page(map, reg,
1764 range);
1765
1766 if (i == 0)
1767 this_page = win_page;
1768 if (win_page != this_page) {
1769 this_page = win_page;
1770 ret = _regmap_raw_multi_reg_write(map, base, n);
1771 if (ret != 0)
1772 return ret;
1773 base += n;
1774 n = 0;
1775 }
1776 ret = _regmap_select_page(map, &base[n].reg, range, 1);
1777 if (ret != 0)
1778 return ret;
1779 }
1780 }
1781 if (n > 0)
1782 return _regmap_raw_multi_reg_write(map, base, n);
1783 return 0;
1784}
1785
1d5b40bc
CK
1786static int _regmap_multi_reg_write(struct regmap *map,
1787 const struct reg_default *regs,
e894c3f4 1788 size_t num_regs)
1d5b40bc 1789{
e894c3f4
OAO
1790 int i;
1791 int ret;
1792
1793 if (!map->can_multi_write) {
1794 for (i = 0; i < num_regs; i++) {
1795 ret = _regmap_write(map, regs[i].reg, regs[i].def);
1796 if (ret != 0)
1797 return ret;
1798 }
1799 return 0;
1800 }
1801
1802 if (!map->format.parse_inplace)
1803 return -EINVAL;
1804
1805 if (map->writeable_reg)
1806 for (i = 0; i < num_regs; i++) {
1807 int reg = regs[i].reg;
1808 if (!map->writeable_reg(map->dev, reg))
1809 return -EINVAL;
1810 if (reg % map->reg_stride)
1811 return -EINVAL;
1812 }
1813
1814 if (!map->cache_bypass) {
1815 for (i = 0; i < num_regs; i++) {
1816 unsigned int val = regs[i].def;
1817 unsigned int reg = regs[i].reg;
1818 ret = regcache_write(map, reg, val);
1819 if (ret) {
1820 dev_err(map->dev,
1821 "Error in caching of register: %x ret: %d\n",
1822 reg, ret);
1823 return ret;
1824 }
1825 }
1826 if (map->cache_only) {
1827 map->cache_dirty = true;
1828 return 0;
1829 }
1830 }
1831
1832 WARN_ON(!map->bus);
1d5b40bc
CK
1833
1834 for (i = 0; i < num_regs; i++) {
e894c3f4
OAO
1835 unsigned int reg = regs[i].reg;
1836 struct regmap_range_node *range;
1837 range = _regmap_range_lookup(map, reg);
1838 if (range) {
1839 size_t len = sizeof(struct reg_default)*num_regs;
1840 struct reg_default *base = kmemdup(regs, len,
1841 GFP_KERNEL);
1842 if (!base)
1843 return -ENOMEM;
1844 ret = _regmap_range_multi_paged_reg_write(map, base,
1845 num_regs);
1846 kfree(base);
1847
1d5b40bc
CK
1848 return ret;
1849 }
1850 }
e894c3f4 1851 return _regmap_raw_multi_reg_write(map, regs, num_regs);
1d5b40bc
CK
1852}
1853
e33fabd3
AO
1854/*
1855 * regmap_multi_reg_write(): Write multiple registers to the device
1856 *
e894c3f4
OAO
1857 * where the set of register,value pairs are supplied in any order,
1858 * possibly not all in a single range.
e33fabd3
AO
1859 *
1860 * @map: Register map to write to
1861 * @regs: Array of structures containing register,value to be written
1862 * @num_regs: Number of registers to write
1863 *
e894c3f4
OAO
1864 * The 'normal' block write mode will send ultimately send data on the
1865 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
1866 * addressed. However, this alternative block multi write mode will send
1867 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
1868 * must of course support the mode.
e33fabd3 1869 *
e894c3f4
OAO
1870 * A value of zero will be returned on success, a negative errno will be
1871 * returned in error cases.
e33fabd3 1872 */
f7e2cec0
CK
1873int regmap_multi_reg_write(struct regmap *map, const struct reg_default *regs,
1874 int num_regs)
e33fabd3 1875{
1d5b40bc 1876 int ret;
e33fabd3
AO
1877
1878 map->lock(map->lock_arg);
1879
1d5b40bc
CK
1880 ret = _regmap_multi_reg_write(map, regs, num_regs);
1881
e33fabd3
AO
1882 map->unlock(map->lock_arg);
1883
1884 return ret;
1885}
1886EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
1887
1d5b40bc
CK
1888/*
1889 * regmap_multi_reg_write_bypassed(): Write multiple registers to the
1890 * device but not the cache
1891 *
e33fabd3
AO
1892 * where the set of register are supplied in any order
1893 *
1894 * @map: Register map to write to
1895 * @regs: Array of structures containing register,value to be written
1896 * @num_regs: Number of registers to write
1897 *
1898 * This function is intended to be used for writing a large block of data
1899 * atomically to the device in single transfer for those I2C client devices
1900 * that implement this alternative block write mode.
1901 *
1902 * A value of zero will be returned on success, a negative errno will
1903 * be returned in error cases.
1904 */
1d5b40bc
CK
1905int regmap_multi_reg_write_bypassed(struct regmap *map,
1906 const struct reg_default *regs,
1907 int num_regs)
e33fabd3 1908{
1d5b40bc
CK
1909 int ret;
1910 bool bypass;
e33fabd3
AO
1911
1912 map->lock(map->lock_arg);
1913
1d5b40bc
CK
1914 bypass = map->cache_bypass;
1915 map->cache_bypass = true;
1916
1917 ret = _regmap_multi_reg_write(map, regs, num_regs);
1918
1919 map->cache_bypass = bypass;
1920
e33fabd3
AO
1921 map->unlock(map->lock_arg);
1922
1923 return ret;
1924}
1d5b40bc 1925EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
e33fabd3 1926
0d509f2b
MB
1927/**
1928 * regmap_raw_write_async(): Write raw values to one or more registers
1929 * asynchronously
1930 *
1931 * @map: Register map to write to
1932 * @reg: Initial register to write to
1933 * @val: Block of data to be written, laid out for direct transmission to the
1934 * device. Must be valid until regmap_async_complete() is called.
1935 * @val_len: Length of data pointed to by val.
1936 *
1937 * This function is intended to be used for things like firmware
1938 * download where a large block of data needs to be transferred to the
1939 * device. No formatting will be done on the data provided.
1940 *
1941 * If supported by the underlying bus the write will be scheduled
1942 * asynchronously, helping maximise I/O speed on higher speed buses
1943 * like SPI. regmap_async_complete() can be called to ensure that all
1944 * asynchrnous writes have been completed.
1945 *
1946 * A value of zero will be returned on success, a negative errno will
1947 * be returned in error cases.
1948 */
1949int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1950 const void *val, size_t val_len)
1951{
1952 int ret;
1953
1954 if (val_len % map->format.val_bytes)
1955 return -EINVAL;
1956 if (reg % map->reg_stride)
1957 return -EINVAL;
1958
1959 map->lock(map->lock_arg);
1960
0a819809
MB
1961 map->async = true;
1962
1963 ret = _regmap_raw_write(map, reg, val, val_len);
1964
1965 map->async = false;
0d509f2b
MB
1966
1967 map->unlock(map->lock_arg);
1968
1969 return ret;
1970}
1971EXPORT_SYMBOL_GPL(regmap_raw_write_async);
1972
b83a313b
MB
1973static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1974 unsigned int val_len)
1975{
98bc7dfd 1976 struct regmap_range_node *range;
b83a313b
MB
1977 u8 *u8 = map->work_buf;
1978 int ret;
1979
f1b5c5c3 1980 WARN_ON(!map->bus);
d2a5884a 1981
98bc7dfd
MB
1982 range = _regmap_range_lookup(map, reg);
1983 if (range) {
1984 ret = _regmap_select_page(map, &reg, range,
1985 val_len / map->format.val_bytes);
0ff3e62f 1986 if (ret != 0)
98bc7dfd
MB
1987 return ret;
1988 }
6863ca62 1989
d939fb9a 1990 map->format.format_reg(map->work_buf, reg, map->reg_shift);
b83a313b
MB
1991
1992 /*
6f306441 1993 * Some buses or devices flag reads by setting the high bits in the
b83a313b
MB
1994 * register addresss; since it's always the high bits for all
1995 * current formats we can do this here rather than in
1996 * formatting. This may break if we get interesting formats.
1997 */
6f306441 1998 u8[0] |= map->read_flag_mask;
b83a313b 1999
fb2736bb
MB
2000 trace_regmap_hw_read_start(map->dev, reg,
2001 val_len / map->format.val_bytes);
2002
0135bbcc 2003 ret = map->bus->read(map->bus_context, map->work_buf,
82159ba8 2004 map->format.reg_bytes + map->format.pad_bytes,
40c5cc26 2005 val, val_len);
b83a313b 2006
fb2736bb
MB
2007 trace_regmap_hw_read_done(map->dev, reg,
2008 val_len / map->format.val_bytes);
2009
2010 return ret;
b83a313b
MB
2011}
2012
3ac17037
BB
2013static int _regmap_bus_reg_read(void *context, unsigned int reg,
2014 unsigned int *val)
2015{
2016 struct regmap *map = context;
2017
2018 return map->bus->reg_read(map->bus_context, reg, val);
2019}
2020
ad278406
AS
2021static int _regmap_bus_read(void *context, unsigned int reg,
2022 unsigned int *val)
2023{
2024 int ret;
2025 struct regmap *map = context;
2026
2027 if (!map->format.parse_val)
2028 return -EINVAL;
2029
2030 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
2031 if (ret == 0)
2032 *val = map->format.parse_val(map->work_buf);
2033
2034 return ret;
2035}
2036
b83a313b
MB
2037static int _regmap_read(struct regmap *map, unsigned int reg,
2038 unsigned int *val)
2039{
2040 int ret;
d2a5884a
AS
2041 void *context = _regmap_map_get_context(map);
2042
f1b5c5c3 2043 WARN_ON(!map->reg_read);
b83a313b 2044
5d1729e7
DP
2045 if (!map->cache_bypass) {
2046 ret = regcache_read(map, reg, val);
2047 if (ret == 0)
2048 return 0;
2049 }
2050
2051 if (map->cache_only)
2052 return -EBUSY;
2053
d4807ad2
MS
2054 if (!regmap_readable(map, reg))
2055 return -EIO;
2056
d2a5884a 2057 ret = map->reg_read(context, reg, val);
fb2736bb 2058 if (ret == 0) {
1044c180
MB
2059#ifdef LOG_DEVICE
2060 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2061 dev_info(map->dev, "%x => %x\n", reg, *val);
2062#endif
2063
fb2736bb 2064 trace_regmap_reg_read(map->dev, reg, *val);
b83a313b 2065
ad278406
AS
2066 if (!map->cache_bypass)
2067 regcache_write(map, reg, *val);
2068 }
f2985367 2069
b83a313b
MB
2070 return ret;
2071}
2072
2073/**
2074 * regmap_read(): Read a value from a single register
2075 *
0093380c 2076 * @map: Register map to read from
b83a313b
MB
2077 * @reg: Register to be read from
2078 * @val: Pointer to store read value
2079 *
2080 * A value of zero will be returned on success, a negative errno will
2081 * be returned in error cases.
2082 */
2083int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2084{
2085 int ret;
2086
f01ee60f
SW
2087 if (reg % map->reg_stride)
2088 return -EINVAL;
2089
0d4529c5 2090 map->lock(map->lock_arg);
b83a313b
MB
2091
2092 ret = _regmap_read(map, reg, val);
2093
0d4529c5 2094 map->unlock(map->lock_arg);
b83a313b
MB
2095
2096 return ret;
2097}
2098EXPORT_SYMBOL_GPL(regmap_read);
2099
2100/**
2101 * regmap_raw_read(): Read raw data from the device
2102 *
0093380c 2103 * @map: Register map to read from
b83a313b
MB
2104 * @reg: First register to be read from
2105 * @val: Pointer to store read value
2106 * @val_len: Size of data to read
2107 *
2108 * A value of zero will be returned on success, a negative errno will
2109 * be returned in error cases.
2110 */
2111int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2112 size_t val_len)
2113{
b8fb5ab1
MB
2114 size_t val_bytes = map->format.val_bytes;
2115 size_t val_count = val_len / val_bytes;
2116 unsigned int v;
2117 int ret, i;
04e016ad 2118
d2a5884a
AS
2119 if (!map->bus)
2120 return -EINVAL;
851960ba
SW
2121 if (val_len % map->format.val_bytes)
2122 return -EINVAL;
f01ee60f
SW
2123 if (reg % map->reg_stride)
2124 return -EINVAL;
851960ba 2125
0d4529c5 2126 map->lock(map->lock_arg);
b83a313b 2127
b8fb5ab1
MB
2128 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2129 map->cache_type == REGCACHE_NONE) {
2130 /* Physical block read if there's no cache involved */
2131 ret = _regmap_raw_read(map, reg, val, val_len);
2132
2133 } else {
2134 /* Otherwise go word by word for the cache; should be low
2135 * cost as we expect to hit the cache.
2136 */
2137 for (i = 0; i < val_count; i++) {
f01ee60f
SW
2138 ret = _regmap_read(map, reg + (i * map->reg_stride),
2139 &v);
b8fb5ab1
MB
2140 if (ret != 0)
2141 goto out;
2142
d939fb9a 2143 map->format.format_val(val + (i * val_bytes), v, 0);
b8fb5ab1
MB
2144 }
2145 }
b83a313b 2146
b8fb5ab1 2147 out:
0d4529c5 2148 map->unlock(map->lock_arg);
b83a313b
MB
2149
2150 return ret;
2151}
2152EXPORT_SYMBOL_GPL(regmap_raw_read);
2153
67252287
SK
2154/**
2155 * regmap_field_read(): Read a value to a single register field
2156 *
2157 * @field: Register field to read from
2158 * @val: Pointer to store read value
2159 *
2160 * A value of zero will be returned on success, a negative errno will
2161 * be returned in error cases.
2162 */
2163int regmap_field_read(struct regmap_field *field, unsigned int *val)
2164{
2165 int ret;
2166 unsigned int reg_val;
2167 ret = regmap_read(field->regmap, field->reg, &reg_val);
2168 if (ret != 0)
2169 return ret;
2170
2171 reg_val &= field->mask;
2172 reg_val >>= field->shift;
2173 *val = reg_val;
2174
2175 return ret;
2176}
2177EXPORT_SYMBOL_GPL(regmap_field_read);
2178
a0102375
KM
2179/**
2180 * regmap_fields_read(): Read a value to a single register field with port ID
2181 *
2182 * @field: Register field to read from
2183 * @id: port ID
2184 * @val: Pointer to store read value
2185 *
2186 * A value of zero will be returned on success, a negative errno will
2187 * be returned in error cases.
2188 */
2189int regmap_fields_read(struct regmap_field *field, unsigned int id,
2190 unsigned int *val)
2191{
2192 int ret;
2193 unsigned int reg_val;
2194
2195 if (id >= field->id_size)
2196 return -EINVAL;
2197
2198 ret = regmap_read(field->regmap,
2199 field->reg + (field->id_offset * id),
2200 &reg_val);
2201 if (ret != 0)
2202 return ret;
2203
2204 reg_val &= field->mask;
2205 reg_val >>= field->shift;
2206 *val = reg_val;
2207
2208 return ret;
2209}
2210EXPORT_SYMBOL_GPL(regmap_fields_read);
2211
b83a313b
MB
2212/**
2213 * regmap_bulk_read(): Read multiple registers from the device
2214 *
0093380c 2215 * @map: Register map to read from
b83a313b
MB
2216 * @reg: First register to be read from
2217 * @val: Pointer to store read value, in native register size for device
2218 * @val_count: Number of registers to read
2219 *
2220 * A value of zero will be returned on success, a negative errno will
2221 * be returned in error cases.
2222 */
2223int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2224 size_t val_count)
2225{
2226 int ret, i;
2227 size_t val_bytes = map->format.val_bytes;
82cd9965 2228 bool vol = regmap_volatile_range(map, reg, val_count);
5d1729e7 2229
f01ee60f
SW
2230 if (reg % map->reg_stride)
2231 return -EINVAL;
b83a313b 2232
3b58ee13 2233 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2e33caf1
AJ
2234 /*
2235 * Some devices does not support bulk read, for
2236 * them we have a series of single read operations.
2237 */
2238 if (map->use_single_rw) {
2239 for (i = 0; i < val_count; i++) {
2240 ret = regmap_raw_read(map,
2241 reg + (i * map->reg_stride),
2242 val + (i * val_bytes),
2243 val_bytes);
2244 if (ret != 0)
2245 return ret;
2246 }
2247 } else {
2248 ret = regmap_raw_read(map, reg, val,
2249 val_bytes * val_count);
2250 if (ret != 0)
2251 return ret;
2252 }
de2d808f
MB
2253
2254 for (i = 0; i < val_count * val_bytes; i += val_bytes)
8a819ff8 2255 map->format.parse_inplace(val + i);
de2d808f
MB
2256 } else {
2257 for (i = 0; i < val_count; i++) {
6560ffd1 2258 unsigned int ival;
f01ee60f 2259 ret = regmap_read(map, reg + (i * map->reg_stride),
25061d28 2260 &ival);
de2d808f
MB
2261 if (ret != 0)
2262 return ret;
6560ffd1 2263 memcpy(val + (i * val_bytes), &ival, val_bytes);
de2d808f
MB
2264 }
2265 }
b83a313b
MB
2266
2267 return 0;
2268}
2269EXPORT_SYMBOL_GPL(regmap_bulk_read);
2270
018690d3
MB
2271static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2272 unsigned int mask, unsigned int val,
2273 bool *change)
b83a313b
MB
2274{
2275 int ret;
d91e8db2 2276 unsigned int tmp, orig;
b83a313b 2277
d91e8db2 2278 ret = _regmap_read(map, reg, &orig);
b83a313b 2279 if (ret != 0)
fc3ebd78 2280 return ret;
b83a313b 2281
d91e8db2 2282 tmp = orig & ~mask;
b83a313b
MB
2283 tmp |= val & mask;
2284
018690d3 2285 if (tmp != orig) {
d91e8db2 2286 ret = _regmap_write(map, reg, tmp);
e2f74dc6
XL
2287 if (change)
2288 *change = true;
018690d3 2289 } else {
e2f74dc6
XL
2290 if (change)
2291 *change = false;
018690d3 2292 }
b83a313b 2293
b83a313b
MB
2294 return ret;
2295}
018690d3
MB
2296
2297/**
2298 * regmap_update_bits: Perform a read/modify/write cycle on the register map
2299 *
2300 * @map: Register map to update
2301 * @reg: Register to update
2302 * @mask: Bitmask to change
2303 * @val: New value for bitmask
2304 *
2305 * Returns zero for success, a negative number on error.
2306 */
2307int regmap_update_bits(struct regmap *map, unsigned int reg,
2308 unsigned int mask, unsigned int val)
2309{
fc3ebd78
KG
2310 int ret;
2311
0d4529c5 2312 map->lock(map->lock_arg);
e2f74dc6 2313 ret = _regmap_update_bits(map, reg, mask, val, NULL);
0d4529c5 2314 map->unlock(map->lock_arg);
fc3ebd78
KG
2315
2316 return ret;
018690d3 2317}
b83a313b 2318EXPORT_SYMBOL_GPL(regmap_update_bits);
31244e39 2319
915f441b
MB
2320/**
2321 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2322 * map asynchronously
2323 *
2324 * @map: Register map to update
2325 * @reg: Register to update
2326 * @mask: Bitmask to change
2327 * @val: New value for bitmask
2328 *
2329 * With most buses the read must be done synchronously so this is most
2330 * useful for devices with a cache which do not need to interact with
2331 * the hardware to determine the current register value.
2332 *
2333 * Returns zero for success, a negative number on error.
2334 */
2335int regmap_update_bits_async(struct regmap *map, unsigned int reg,
2336 unsigned int mask, unsigned int val)
2337{
915f441b
MB
2338 int ret;
2339
2340 map->lock(map->lock_arg);
2341
2342 map->async = true;
2343
e2f74dc6 2344 ret = _regmap_update_bits(map, reg, mask, val, NULL);
915f441b
MB
2345
2346 map->async = false;
2347
2348 map->unlock(map->lock_arg);
2349
2350 return ret;
2351}
2352EXPORT_SYMBOL_GPL(regmap_update_bits_async);
2353
018690d3
MB
2354/**
2355 * regmap_update_bits_check: Perform a read/modify/write cycle on the
2356 * register map and report if updated
2357 *
2358 * @map: Register map to update
2359 * @reg: Register to update
2360 * @mask: Bitmask to change
2361 * @val: New value for bitmask
2362 * @change: Boolean indicating if a write was done
2363 *
2364 * Returns zero for success, a negative number on error.
2365 */
2366int regmap_update_bits_check(struct regmap *map, unsigned int reg,
2367 unsigned int mask, unsigned int val,
2368 bool *change)
2369{
fc3ebd78
KG
2370 int ret;
2371
0d4529c5 2372 map->lock(map->lock_arg);
fc3ebd78 2373 ret = _regmap_update_bits(map, reg, mask, val, change);
0d4529c5 2374 map->unlock(map->lock_arg);
fc3ebd78 2375 return ret;
018690d3
MB
2376}
2377EXPORT_SYMBOL_GPL(regmap_update_bits_check);
2378
915f441b
MB
2379/**
2380 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2381 * register map asynchronously and report if
2382 * updated
2383 *
2384 * @map: Register map to update
2385 * @reg: Register to update
2386 * @mask: Bitmask to change
2387 * @val: New value for bitmask
2388 * @change: Boolean indicating if a write was done
2389 *
2390 * With most buses the read must be done synchronously so this is most
2391 * useful for devices with a cache which do not need to interact with
2392 * the hardware to determine the current register value.
2393 *
2394 * Returns zero for success, a negative number on error.
2395 */
2396int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
2397 unsigned int mask, unsigned int val,
2398 bool *change)
2399{
2400 int ret;
2401
2402 map->lock(map->lock_arg);
2403
2404 map->async = true;
2405
2406 ret = _regmap_update_bits(map, reg, mask, val, change);
2407
2408 map->async = false;
2409
2410 map->unlock(map->lock_arg);
2411
2412 return ret;
2413}
2414EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
2415
0d509f2b
MB
2416void regmap_async_complete_cb(struct regmap_async *async, int ret)
2417{
2418 struct regmap *map = async->map;
2419 bool wake;
2420
fe7d4ccd
MB
2421 trace_regmap_async_io_complete(map->dev);
2422
0d509f2b 2423 spin_lock(&map->async_lock);
7e09a979 2424 list_move(&async->list, &map->async_free);
0d509f2b
MB
2425 wake = list_empty(&map->async_list);
2426
2427 if (ret != 0)
2428 map->async_ret = ret;
2429
2430 spin_unlock(&map->async_lock);
2431
0d509f2b
MB
2432 if (wake)
2433 wake_up(&map->async_waitq);
2434}
f804fb56 2435EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
0d509f2b
MB
2436
2437static int regmap_async_is_done(struct regmap *map)
2438{
2439 unsigned long flags;
2440 int ret;
2441
2442 spin_lock_irqsave(&map->async_lock, flags);
2443 ret = list_empty(&map->async_list);
2444 spin_unlock_irqrestore(&map->async_lock, flags);
2445
2446 return ret;
2447}
2448
2449/**
2450 * regmap_async_complete: Ensure all asynchronous I/O has completed.
2451 *
2452 * @map: Map to operate on.
2453 *
2454 * Blocks until any pending asynchronous I/O has completed. Returns
2455 * an error code for any failed I/O operations.
2456 */
2457int regmap_async_complete(struct regmap *map)
2458{
2459 unsigned long flags;
2460 int ret;
2461
2462 /* Nothing to do with no async support */
f2e055e7 2463 if (!map->bus || !map->bus->async_write)
0d509f2b
MB
2464 return 0;
2465
fe7d4ccd
MB
2466 trace_regmap_async_complete_start(map->dev);
2467
0d509f2b
MB
2468 wait_event(map->async_waitq, regmap_async_is_done(map));
2469
2470 spin_lock_irqsave(&map->async_lock, flags);
2471 ret = map->async_ret;
2472 map->async_ret = 0;
2473 spin_unlock_irqrestore(&map->async_lock, flags);
2474
fe7d4ccd
MB
2475 trace_regmap_async_complete_done(map->dev);
2476
0d509f2b
MB
2477 return ret;
2478}
f88948ef 2479EXPORT_SYMBOL_GPL(regmap_async_complete);
0d509f2b 2480
22f0d90a
MB
2481/**
2482 * regmap_register_patch: Register and apply register updates to be applied
2483 * on device initialistion
2484 *
2485 * @map: Register map to apply updates to.
2486 * @regs: Values to update.
2487 * @num_regs: Number of entries in regs.
2488 *
2489 * Register a set of register updates to be applied to the device
2490 * whenever the device registers are synchronised with the cache and
2491 * apply them immediately. Typically this is used to apply
2492 * corrections to be applied to the device defaults on startup, such
2493 * as the updates some vendors provide to undocumented registers.
56fb1c74
MB
2494 *
2495 * The caller must ensure that this function cannot be called
2496 * concurrently with either itself or regcache_sync().
22f0d90a
MB
2497 */
2498int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
2499 int num_regs)
2500{
aab13ebc 2501 struct reg_default *p;
6bf13103 2502 int ret;
22f0d90a
MB
2503 bool bypass;
2504
bd60e381
CZ
2505 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2506 num_regs))
2507 return 0;
2508
aab13ebc
MB
2509 p = krealloc(map->patch,
2510 sizeof(struct reg_default) * (map->patch_regs + num_regs),
2511 GFP_KERNEL);
2512 if (p) {
2513 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2514 map->patch = p;
2515 map->patch_regs += num_regs;
22f0d90a 2516 } else {
56fb1c74 2517 return -ENOMEM;
22f0d90a
MB
2518 }
2519
0d4529c5 2520 map->lock(map->lock_arg);
22f0d90a
MB
2521
2522 bypass = map->cache_bypass;
2523
2524 map->cache_bypass = true;
1a25f261 2525 map->async = true;
22f0d90a 2526
6bf13103
CK
2527 ret = _regmap_multi_reg_write(map, regs, num_regs);
2528 if (ret != 0)
2529 goto out;
22f0d90a 2530
22f0d90a 2531out:
1a25f261 2532 map->async = false;
22f0d90a
MB
2533 map->cache_bypass = bypass;
2534
0d4529c5 2535 map->unlock(map->lock_arg);
22f0d90a 2536
1a25f261
MB
2537 regmap_async_complete(map);
2538
22f0d90a
MB
2539 return ret;
2540}
2541EXPORT_SYMBOL_GPL(regmap_register_patch);
2542
eae4b51b 2543/*
a6539c32
MB
2544 * regmap_get_val_bytes(): Report the size of a register value
2545 *
2546 * Report the size of a register value, mainly intended to for use by
2547 * generic infrastructure built on top of regmap.
2548 */
2549int regmap_get_val_bytes(struct regmap *map)
2550{
2551 if (map->format.format_write)
2552 return -EINVAL;
2553
2554 return map->format.val_bytes;
2555}
2556EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2557
13ff50c8
NC
2558int regmap_parse_val(struct regmap *map, const void *buf,
2559 unsigned int *val)
2560{
2561 if (!map->format.parse_val)
2562 return -EINVAL;
2563
2564 *val = map->format.parse_val(buf);
2565
2566 return 0;
2567}
2568EXPORT_SYMBOL_GPL(regmap_parse_val);
2569
31244e39
MB
2570static int __init regmap_initcall(void)
2571{
2572 regmap_debugfs_initcall();
2573
2574 return 0;
2575}
2576postcore_initcall(regmap_initcall);
This page took 0.315828 seconds and 5 git commands to generate.