Commit | Line | Data |
---|---|---|
9fabe24e DP |
1 | /* |
2 | * Register cache access API | |
3 | * | |
4 | * Copyright 2011 Wolfson Microelectronics plc | |
5 | * | |
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/slab.h> | |
1b6bc32f | 14 | #include <linux/export.h> |
51990e82 | 15 | #include <linux/device.h> |
9fabe24e | 16 | #include <trace/events/regmap.h> |
f094fea6 | 17 | #include <linux/bsearch.h> |
c08604b8 | 18 | #include <linux/sort.h> |
9fabe24e DP |
19 | |
20 | #include "internal.h" | |
21 | ||
22 | static const struct regcache_ops *cache_types[] = { | |
28644c80 | 23 | ®cache_rbtree_ops, |
2cbbb579 | 24 | ®cache_lzo_ops, |
2ac902ce | 25 | ®cache_flat_ops, |
9fabe24e DP |
26 | }; |
27 | ||
28 | static int regcache_hw_init(struct regmap *map) | |
29 | { | |
30 | int i, j; | |
31 | int ret; | |
32 | int count; | |
33 | unsigned int val; | |
34 | void *tmp_buf; | |
35 | ||
36 | if (!map->num_reg_defaults_raw) | |
37 | return -EINVAL; | |
38 | ||
39 | if (!map->reg_defaults_raw) { | |
df00c79f | 40 | u32 cache_bypass = map->cache_bypass; |
9fabe24e | 41 | dev_warn(map->dev, "No cache defaults, reading back from HW\n"); |
df00c79f LD |
42 | |
43 | /* Bypass the cache access till data read from HW*/ | |
44 | map->cache_bypass = 1; | |
9fabe24e DP |
45 | tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); |
46 | if (!tmp_buf) | |
47 | return -EINVAL; | |
eb4cb76f MB |
48 | ret = regmap_raw_read(map, 0, tmp_buf, |
49 | map->num_reg_defaults_raw); | |
df00c79f | 50 | map->cache_bypass = cache_bypass; |
9fabe24e DP |
51 | if (ret < 0) { |
52 | kfree(tmp_buf); | |
53 | return ret; | |
54 | } | |
55 | map->reg_defaults_raw = tmp_buf; | |
56 | map->cache_free = 1; | |
57 | } | |
58 | ||
59 | /* calculate the size of reg_defaults */ | |
60 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { | |
879082c9 | 61 | val = regcache_get_val(map, map->reg_defaults_raw, i); |
f01ee60f | 62 | if (regmap_volatile(map, i * map->reg_stride)) |
9fabe24e DP |
63 | continue; |
64 | count++; | |
65 | } | |
66 | ||
67 | map->reg_defaults = kmalloc(count * sizeof(struct reg_default), | |
68 | GFP_KERNEL); | |
021cd616 LPC |
69 | if (!map->reg_defaults) { |
70 | ret = -ENOMEM; | |
71 | goto err_free; | |
72 | } | |
9fabe24e DP |
73 | |
74 | /* fill the reg_defaults */ | |
75 | map->num_reg_defaults = count; | |
76 | for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { | |
879082c9 | 77 | val = regcache_get_val(map, map->reg_defaults_raw, i); |
f01ee60f | 78 | if (regmap_volatile(map, i * map->reg_stride)) |
9fabe24e | 79 | continue; |
f01ee60f | 80 | map->reg_defaults[j].reg = i * map->reg_stride; |
9fabe24e DP |
81 | map->reg_defaults[j].def = val; |
82 | j++; | |
83 | } | |
84 | ||
85 | return 0; | |
021cd616 LPC |
86 | |
87 | err_free: | |
88 | if (map->cache_free) | |
89 | kfree(map->reg_defaults_raw); | |
90 | ||
91 | return ret; | |
9fabe24e DP |
92 | } |
93 | ||
e5e3b8ab | 94 | int regcache_init(struct regmap *map, const struct regmap_config *config) |
9fabe24e DP |
95 | { |
96 | int ret; | |
97 | int i; | |
98 | void *tmp_buf; | |
99 | ||
f01ee60f SW |
100 | for (i = 0; i < config->num_reg_defaults; i++) |
101 | if (config->reg_defaults[i].reg % map->reg_stride) | |
102 | return -EINVAL; | |
103 | ||
e7a6db30 MB |
104 | if (map->cache_type == REGCACHE_NONE) { |
105 | map->cache_bypass = true; | |
9fabe24e | 106 | return 0; |
e7a6db30 | 107 | } |
9fabe24e DP |
108 | |
109 | for (i = 0; i < ARRAY_SIZE(cache_types); i++) | |
110 | if (cache_types[i]->type == map->cache_type) | |
111 | break; | |
112 | ||
113 | if (i == ARRAY_SIZE(cache_types)) { | |
114 | dev_err(map->dev, "Could not match compress type: %d\n", | |
115 | map->cache_type); | |
116 | return -EINVAL; | |
117 | } | |
118 | ||
e5e3b8ab LPC |
119 | map->num_reg_defaults = config->num_reg_defaults; |
120 | map->num_reg_defaults_raw = config->num_reg_defaults_raw; | |
121 | map->reg_defaults_raw = config->reg_defaults_raw; | |
064d4db1 LPC |
122 | map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); |
123 | map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; | |
e5e3b8ab | 124 | |
9fabe24e DP |
125 | map->cache = NULL; |
126 | map->cache_ops = cache_types[i]; | |
127 | ||
128 | if (!map->cache_ops->read || | |
129 | !map->cache_ops->write || | |
130 | !map->cache_ops->name) | |
131 | return -EINVAL; | |
132 | ||
133 | /* We still need to ensure that the reg_defaults | |
134 | * won't vanish from under us. We'll need to make | |
135 | * a copy of it. | |
136 | */ | |
720e4616 | 137 | if (config->reg_defaults) { |
9fabe24e DP |
138 | if (!map->num_reg_defaults) |
139 | return -EINVAL; | |
720e4616 | 140 | tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * |
9fabe24e DP |
141 | sizeof(struct reg_default), GFP_KERNEL); |
142 | if (!tmp_buf) | |
143 | return -ENOMEM; | |
144 | map->reg_defaults = tmp_buf; | |
8528bdd4 | 145 | } else if (map->num_reg_defaults_raw) { |
5fcd2560 | 146 | /* Some devices such as PMICs don't have cache defaults, |
9fabe24e DP |
147 | * we cope with this by reading back the HW registers and |
148 | * crafting the cache defaults by hand. | |
149 | */ | |
150 | ret = regcache_hw_init(map); | |
151 | if (ret < 0) | |
152 | return ret; | |
153 | } | |
154 | ||
155 | if (!map->max_register) | |
156 | map->max_register = map->num_reg_defaults_raw; | |
157 | ||
158 | if (map->cache_ops->init) { | |
159 | dev_dbg(map->dev, "Initializing %s cache\n", | |
160 | map->cache_ops->name); | |
bd061c78 LPC |
161 | ret = map->cache_ops->init(map); |
162 | if (ret) | |
163 | goto err_free; | |
9fabe24e DP |
164 | } |
165 | return 0; | |
bd061c78 LPC |
166 | |
167 | err_free: | |
168 | kfree(map->reg_defaults); | |
169 | if (map->cache_free) | |
170 | kfree(map->reg_defaults_raw); | |
171 | ||
172 | return ret; | |
9fabe24e DP |
173 | } |
174 | ||
175 | void regcache_exit(struct regmap *map) | |
176 | { | |
177 | if (map->cache_type == REGCACHE_NONE) | |
178 | return; | |
179 | ||
180 | BUG_ON(!map->cache_ops); | |
181 | ||
182 | kfree(map->reg_defaults); | |
183 | if (map->cache_free) | |
184 | kfree(map->reg_defaults_raw); | |
185 | ||
186 | if (map->cache_ops->exit) { | |
187 | dev_dbg(map->dev, "Destroying %s cache\n", | |
188 | map->cache_ops->name); | |
189 | map->cache_ops->exit(map); | |
190 | } | |
191 | } | |
192 | ||
193 | /** | |
194 | * regcache_read: Fetch the value of a given register from the cache. | |
195 | * | |
196 | * @map: map to configure. | |
197 | * @reg: The register index. | |
198 | * @value: The value to be returned. | |
199 | * | |
200 | * Return a negative value on failure, 0 on success. | |
201 | */ | |
202 | int regcache_read(struct regmap *map, | |
203 | unsigned int reg, unsigned int *value) | |
204 | { | |
bc7ee556 MB |
205 | int ret; |
206 | ||
9fabe24e DP |
207 | if (map->cache_type == REGCACHE_NONE) |
208 | return -ENOSYS; | |
209 | ||
210 | BUG_ON(!map->cache_ops); | |
211 | ||
bc7ee556 MB |
212 | if (!regmap_volatile(map, reg)) { |
213 | ret = map->cache_ops->read(map, reg, value); | |
214 | ||
215 | if (ret == 0) | |
216 | trace_regmap_reg_read_cache(map->dev, reg, *value); | |
217 | ||
218 | return ret; | |
219 | } | |
9fabe24e DP |
220 | |
221 | return -EINVAL; | |
222 | } | |
9fabe24e DP |
223 | |
224 | /** | |
225 | * regcache_write: Set the value of a given register in the cache. | |
226 | * | |
227 | * @map: map to configure. | |
228 | * @reg: The register index. | |
229 | * @value: The new register value. | |
230 | * | |
231 | * Return a negative value on failure, 0 on success. | |
232 | */ | |
233 | int regcache_write(struct regmap *map, | |
234 | unsigned int reg, unsigned int value) | |
235 | { | |
236 | if (map->cache_type == REGCACHE_NONE) | |
237 | return 0; | |
238 | ||
239 | BUG_ON(!map->cache_ops); | |
240 | ||
9fabe24e DP |
241 | if (!regmap_volatile(map, reg)) |
242 | return map->cache_ops->write(map, reg, value); | |
243 | ||
244 | return 0; | |
245 | } | |
9fabe24e | 246 | |
d856fce4 MH |
247 | static int regcache_default_sync(struct regmap *map, unsigned int min, |
248 | unsigned int max) | |
249 | { | |
250 | unsigned int reg; | |
251 | ||
75617328 | 252 | for (reg = min; reg <= max; reg += map->reg_stride) { |
d856fce4 MH |
253 | unsigned int val; |
254 | int ret; | |
255 | ||
83f8475c DR |
256 | if (regmap_volatile(map, reg) || |
257 | !regmap_writeable(map, reg)) | |
d856fce4 MH |
258 | continue; |
259 | ||
260 | ret = regcache_read(map, reg, &val); | |
261 | if (ret) | |
262 | return ret; | |
263 | ||
264 | /* Is this the hardware default? If so skip. */ | |
265 | ret = regcache_lookup_reg(map, reg); | |
266 | if (ret >= 0 && val == map->reg_defaults[ret].def) | |
267 | continue; | |
268 | ||
269 | map->cache_bypass = 1; | |
270 | ret = _regmap_write(map, reg, val); | |
271 | map->cache_bypass = 0; | |
272 | if (ret) | |
273 | return ret; | |
274 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); | |
275 | } | |
276 | ||
277 | return 0; | |
278 | } | |
279 | ||
9fabe24e DP |
280 | /** |
281 | * regcache_sync: Sync the register cache with the hardware. | |
282 | * | |
283 | * @map: map to configure. | |
284 | * | |
285 | * Any registers that should not be synced should be marked as | |
286 | * volatile. In general drivers can choose not to use the provided | |
287 | * syncing functionality if they so require. | |
288 | * | |
289 | * Return a negative value on failure, 0 on success. | |
290 | */ | |
291 | int regcache_sync(struct regmap *map) | |
292 | { | |
954757d7 | 293 | int ret = 0; |
954757d7 | 294 | unsigned int i; |
59360089 | 295 | const char *name; |
beb1a10f | 296 | unsigned int bypass; |
59360089 | 297 | |
d856fce4 | 298 | BUG_ON(!map->cache_ops); |
9fabe24e | 299 | |
81485f52 | 300 | map->lock(map->lock_arg); |
beb1a10f DP |
301 | /* Remember the initial bypass state */ |
302 | bypass = map->cache_bypass; | |
954757d7 DP |
303 | dev_dbg(map->dev, "Syncing %s cache\n", |
304 | map->cache_ops->name); | |
305 | name = map->cache_ops->name; | |
306 | trace_regcache_sync(map->dev, name, "start"); | |
22f0d90a | 307 | |
8ae0d7e8 MB |
308 | if (!map->cache_dirty) |
309 | goto out; | |
d9db7627 | 310 | |
affbe886 MB |
311 | map->async = true; |
312 | ||
22f0d90a | 313 | /* Apply any patch first */ |
8a892d69 | 314 | map->cache_bypass = 1; |
22f0d90a MB |
315 | for (i = 0; i < map->patch_regs; i++) { |
316 | ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); | |
317 | if (ret != 0) { | |
318 | dev_err(map->dev, "Failed to write %x = %x: %d\n", | |
319 | map->patch[i].reg, map->patch[i].def, ret); | |
320 | goto out; | |
321 | } | |
322 | } | |
8a892d69 | 323 | map->cache_bypass = 0; |
22f0d90a | 324 | |
d856fce4 MH |
325 | if (map->cache_ops->sync) |
326 | ret = map->cache_ops->sync(map, 0, map->max_register); | |
327 | else | |
328 | ret = regcache_default_sync(map, 0, map->max_register); | |
954757d7 | 329 | |
6ff73738 MB |
330 | if (ret == 0) |
331 | map->cache_dirty = false; | |
954757d7 | 332 | |
954757d7 | 333 | out: |
beb1a10f | 334 | /* Restore the bypass state */ |
affbe886 | 335 | map->async = false; |
beb1a10f | 336 | map->cache_bypass = bypass; |
81485f52 | 337 | map->unlock(map->lock_arg); |
954757d7 | 338 | |
affbe886 MB |
339 | regmap_async_complete(map); |
340 | ||
341 | trace_regcache_sync(map->dev, name, "stop"); | |
342 | ||
954757d7 | 343 | return ret; |
9fabe24e DP |
344 | } |
345 | EXPORT_SYMBOL_GPL(regcache_sync); | |
346 | ||
4d4cfd16 MB |
347 | /** |
348 | * regcache_sync_region: Sync part of the register cache with the hardware. | |
349 | * | |
350 | * @map: map to sync. | |
351 | * @min: first register to sync | |
352 | * @max: last register to sync | |
353 | * | |
354 | * Write all non-default register values in the specified region to | |
355 | * the hardware. | |
356 | * | |
357 | * Return a negative value on failure, 0 on success. | |
358 | */ | |
359 | int regcache_sync_region(struct regmap *map, unsigned int min, | |
360 | unsigned int max) | |
361 | { | |
362 | int ret = 0; | |
363 | const char *name; | |
364 | unsigned int bypass; | |
365 | ||
d856fce4 | 366 | BUG_ON(!map->cache_ops); |
4d4cfd16 | 367 | |
81485f52 | 368 | map->lock(map->lock_arg); |
4d4cfd16 MB |
369 | |
370 | /* Remember the initial bypass state */ | |
371 | bypass = map->cache_bypass; | |
372 | ||
373 | name = map->cache_ops->name; | |
374 | dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); | |
375 | ||
376 | trace_regcache_sync(map->dev, name, "start region"); | |
377 | ||
378 | if (!map->cache_dirty) | |
379 | goto out; | |
380 | ||
affbe886 MB |
381 | map->async = true; |
382 | ||
d856fce4 MH |
383 | if (map->cache_ops->sync) |
384 | ret = map->cache_ops->sync(map, min, max); | |
385 | else | |
386 | ret = regcache_default_sync(map, min, max); | |
4d4cfd16 MB |
387 | |
388 | out: | |
4d4cfd16 MB |
389 | /* Restore the bypass state */ |
390 | map->cache_bypass = bypass; | |
affbe886 | 391 | map->async = false; |
81485f52 | 392 | map->unlock(map->lock_arg); |
4d4cfd16 | 393 | |
affbe886 MB |
394 | regmap_async_complete(map); |
395 | ||
396 | trace_regcache_sync(map->dev, name, "stop region"); | |
397 | ||
4d4cfd16 MB |
398 | return ret; |
399 | } | |
e466de05 | 400 | EXPORT_SYMBOL_GPL(regcache_sync_region); |
4d4cfd16 | 401 | |
697e85bc MB |
402 | /** |
403 | * regcache_drop_region: Discard part of the register cache | |
404 | * | |
405 | * @map: map to operate on | |
406 | * @min: first register to discard | |
407 | * @max: last register to discard | |
408 | * | |
409 | * Discard part of the register cache. | |
410 | * | |
411 | * Return a negative value on failure, 0 on success. | |
412 | */ | |
413 | int regcache_drop_region(struct regmap *map, unsigned int min, | |
414 | unsigned int max) | |
415 | { | |
697e85bc MB |
416 | int ret = 0; |
417 | ||
3f4ff561 | 418 | if (!map->cache_ops || !map->cache_ops->drop) |
697e85bc MB |
419 | return -EINVAL; |
420 | ||
81485f52 | 421 | map->lock(map->lock_arg); |
697e85bc MB |
422 | |
423 | trace_regcache_drop_region(map->dev, min, max); | |
424 | ||
3f4ff561 | 425 | ret = map->cache_ops->drop(map, min, max); |
697e85bc | 426 | |
81485f52 | 427 | map->unlock(map->lock_arg); |
697e85bc MB |
428 | |
429 | return ret; | |
430 | } | |
431 | EXPORT_SYMBOL_GPL(regcache_drop_region); | |
432 | ||
92afb286 MB |
433 | /** |
434 | * regcache_cache_only: Put a register map into cache only mode | |
435 | * | |
436 | * @map: map to configure | |
437 | * @cache_only: flag if changes should be written to the hardware | |
438 | * | |
439 | * When a register map is marked as cache only writes to the register | |
440 | * map API will only update the register cache, they will not cause | |
441 | * any hardware changes. This is useful for allowing portions of | |
442 | * drivers to act as though the device were functioning as normal when | |
443 | * it is disabled for power saving reasons. | |
444 | */ | |
445 | void regcache_cache_only(struct regmap *map, bool enable) | |
446 | { | |
81485f52 | 447 | map->lock(map->lock_arg); |
ac77a765 | 448 | WARN_ON(map->cache_bypass && enable); |
92afb286 | 449 | map->cache_only = enable; |
5d5b7d4f | 450 | trace_regmap_cache_only(map->dev, enable); |
81485f52 | 451 | map->unlock(map->lock_arg); |
92afb286 MB |
452 | } |
453 | EXPORT_SYMBOL_GPL(regcache_cache_only); | |
454 | ||
8ae0d7e8 MB |
455 | /** |
456 | * regcache_mark_dirty: Mark the register cache as dirty | |
457 | * | |
458 | * @map: map to mark | |
459 | * | |
460 | * Mark the register cache as dirty, for example due to the device | |
461 | * having been powered down for suspend. If the cache is not marked | |
462 | * as dirty then the cache sync will be suppressed. | |
463 | */ | |
464 | void regcache_mark_dirty(struct regmap *map) | |
465 | { | |
81485f52 | 466 | map->lock(map->lock_arg); |
8ae0d7e8 | 467 | map->cache_dirty = true; |
81485f52 | 468 | map->unlock(map->lock_arg); |
8ae0d7e8 MB |
469 | } |
470 | EXPORT_SYMBOL_GPL(regcache_mark_dirty); | |
471 | ||
6eb0f5e0 DP |
472 | /** |
473 | * regcache_cache_bypass: Put a register map into cache bypass mode | |
474 | * | |
475 | * @map: map to configure | |
0eef6b04 | 476 | * @cache_bypass: flag if changes should not be written to the hardware |
6eb0f5e0 DP |
477 | * |
478 | * When a register map is marked with the cache bypass option, writes | |
479 | * to the register map API will only update the hardware and not the | |
480 | * the cache directly. This is useful when syncing the cache back to | |
481 | * the hardware. | |
482 | */ | |
483 | void regcache_cache_bypass(struct regmap *map, bool enable) | |
484 | { | |
81485f52 | 485 | map->lock(map->lock_arg); |
ac77a765 | 486 | WARN_ON(map->cache_only && enable); |
6eb0f5e0 | 487 | map->cache_bypass = enable; |
5d5b7d4f | 488 | trace_regmap_cache_bypass(map->dev, enable); |
81485f52 | 489 | map->unlock(map->lock_arg); |
6eb0f5e0 DP |
490 | } |
491 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | |
492 | ||
879082c9 MB |
493 | bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, |
494 | unsigned int val) | |
9fabe24e | 495 | { |
325acab4 MB |
496 | if (regcache_get_val(map, base, idx) == val) |
497 | return true; | |
498 | ||
eb4cb76f MB |
499 | /* Use device native format if possible */ |
500 | if (map->format.format_val) { | |
501 | map->format.format_val(base + (map->cache_word_size * idx), | |
502 | val, 0); | |
503 | return false; | |
504 | } | |
505 | ||
879082c9 | 506 | switch (map->cache_word_size) { |
9fabe24e DP |
507 | case 1: { |
508 | u8 *cache = base; | |
9fabe24e DP |
509 | cache[idx] = val; |
510 | break; | |
511 | } | |
512 | case 2: { | |
513 | u16 *cache = base; | |
9fabe24e DP |
514 | cache[idx] = val; |
515 | break; | |
516 | } | |
7d5e525b MB |
517 | case 4: { |
518 | u32 *cache = base; | |
7d5e525b MB |
519 | cache[idx] = val; |
520 | break; | |
521 | } | |
9fabe24e DP |
522 | default: |
523 | BUG(); | |
524 | } | |
9fabe24e DP |
525 | return false; |
526 | } | |
527 | ||
879082c9 MB |
528 | unsigned int regcache_get_val(struct regmap *map, const void *base, |
529 | unsigned int idx) | |
9fabe24e DP |
530 | { |
531 | if (!base) | |
532 | return -EINVAL; | |
533 | ||
eb4cb76f MB |
534 | /* Use device native format if possible */ |
535 | if (map->format.parse_val) | |
8817796b MB |
536 | return map->format.parse_val(regcache_get_val_addr(map, base, |
537 | idx)); | |
eb4cb76f | 538 | |
879082c9 | 539 | switch (map->cache_word_size) { |
9fabe24e DP |
540 | case 1: { |
541 | const u8 *cache = base; | |
542 | return cache[idx]; | |
543 | } | |
544 | case 2: { | |
545 | const u16 *cache = base; | |
546 | return cache[idx]; | |
547 | } | |
7d5e525b MB |
548 | case 4: { |
549 | const u32 *cache = base; | |
550 | return cache[idx]; | |
551 | } | |
9fabe24e DP |
552 | default: |
553 | BUG(); | |
554 | } | |
555 | /* unreachable */ | |
556 | return -1; | |
557 | } | |
558 | ||
f094fea6 | 559 | static int regcache_default_cmp(const void *a, const void *b) |
c08604b8 DP |
560 | { |
561 | const struct reg_default *_a = a; | |
562 | const struct reg_default *_b = b; | |
563 | ||
564 | return _a->reg - _b->reg; | |
565 | } | |
566 | ||
f094fea6 MB |
567 | int regcache_lookup_reg(struct regmap *map, unsigned int reg) |
568 | { | |
569 | struct reg_default key; | |
570 | struct reg_default *r; | |
571 | ||
572 | key.reg = reg; | |
573 | key.def = 0; | |
574 | ||
575 | r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, | |
576 | sizeof(struct reg_default), regcache_default_cmp); | |
577 | ||
578 | if (r) | |
579 | return r - map->reg_defaults; | |
580 | else | |
6e6ace00 | 581 | return -ENOENT; |
f094fea6 | 582 | } |
f8bd822c | 583 | |
3f4ff561 LPC |
584 | static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx) |
585 | { | |
586 | if (!cache_present) | |
587 | return true; | |
588 | ||
589 | return test_bit(idx, cache_present); | |
590 | } | |
591 | ||
cfdeb8c3 | 592 | static int regcache_sync_block_single(struct regmap *map, void *block, |
3f4ff561 | 593 | unsigned long *cache_present, |
cfdeb8c3 MB |
594 | unsigned int block_base, |
595 | unsigned int start, unsigned int end) | |
596 | { | |
597 | unsigned int i, regtmp, val; | |
598 | int ret; | |
599 | ||
600 | for (i = start; i < end; i++) { | |
601 | regtmp = block_base + (i * map->reg_stride); | |
602 | ||
3f4ff561 | 603 | if (!regcache_reg_present(cache_present, i)) |
cfdeb8c3 MB |
604 | continue; |
605 | ||
606 | val = regcache_get_val(map, block, i); | |
607 | ||
608 | /* Is this the hardware default? If so skip. */ | |
609 | ret = regcache_lookup_reg(map, regtmp); | |
610 | if (ret >= 0 && val == map->reg_defaults[ret].def) | |
611 | continue; | |
612 | ||
613 | map->cache_bypass = 1; | |
614 | ||
615 | ret = _regmap_write(map, regtmp, val); | |
616 | ||
617 | map->cache_bypass = 0; | |
618 | if (ret != 0) | |
619 | return ret; | |
620 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | |
621 | regtmp, val); | |
622 | } | |
623 | ||
624 | return 0; | |
625 | } | |
626 | ||
75a5f89f MB |
627 | static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, |
628 | unsigned int base, unsigned int cur) | |
629 | { | |
630 | size_t val_bytes = map->format.val_bytes; | |
631 | int ret, count; | |
632 | ||
633 | if (*data == NULL) | |
634 | return 0; | |
635 | ||
78ba73ee | 636 | count = (cur - base) / map->reg_stride; |
75a5f89f | 637 | |
9659293c | 638 | dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", |
78ba73ee | 639 | count * val_bytes, count, base, cur - map->reg_stride); |
75a5f89f MB |
640 | |
641 | map->cache_bypass = 1; | |
642 | ||
0a819809 | 643 | ret = _regmap_raw_write(map, base, *data, count * val_bytes); |
75a5f89f MB |
644 | |
645 | map->cache_bypass = 0; | |
646 | ||
647 | *data = NULL; | |
648 | ||
649 | return ret; | |
650 | } | |
651 | ||
f52687af | 652 | static int regcache_sync_block_raw(struct regmap *map, void *block, |
3f4ff561 | 653 | unsigned long *cache_present, |
cfdeb8c3 MB |
654 | unsigned int block_base, unsigned int start, |
655 | unsigned int end) | |
f8bd822c | 656 | { |
75a5f89f MB |
657 | unsigned int i, val; |
658 | unsigned int regtmp = 0; | |
659 | unsigned int base = 0; | |
660 | const void *data = NULL; | |
f8bd822c MB |
661 | int ret; |
662 | ||
663 | for (i = start; i < end; i++) { | |
664 | regtmp = block_base + (i * map->reg_stride); | |
665 | ||
3f4ff561 | 666 | if (!regcache_reg_present(cache_present, i)) { |
75a5f89f MB |
667 | ret = regcache_sync_block_raw_flush(map, &data, |
668 | base, regtmp); | |
669 | if (ret != 0) | |
670 | return ret; | |
f8bd822c | 671 | continue; |
75a5f89f | 672 | } |
f8bd822c MB |
673 | |
674 | val = regcache_get_val(map, block, i); | |
675 | ||
676 | /* Is this the hardware default? If so skip. */ | |
677 | ret = regcache_lookup_reg(map, regtmp); | |
75a5f89f MB |
678 | if (ret >= 0 && val == map->reg_defaults[ret].def) { |
679 | ret = regcache_sync_block_raw_flush(map, &data, | |
680 | base, regtmp); | |
681 | if (ret != 0) | |
682 | return ret; | |
f8bd822c | 683 | continue; |
75a5f89f | 684 | } |
f8bd822c | 685 | |
75a5f89f MB |
686 | if (!data) { |
687 | data = regcache_get_val_addr(map, block, i); | |
688 | base = regtmp; | |
689 | } | |
f8bd822c MB |
690 | } |
691 | ||
2d49b598 LPC |
692 | return regcache_sync_block_raw_flush(map, &data, base, regtmp + |
693 | map->reg_stride); | |
f8bd822c | 694 | } |
cfdeb8c3 MB |
695 | |
696 | int regcache_sync_block(struct regmap *map, void *block, | |
3f4ff561 | 697 | unsigned long *cache_present, |
cfdeb8c3 MB |
698 | unsigned int block_base, unsigned int start, |
699 | unsigned int end) | |
700 | { | |
701 | if (regmap_can_raw_write(map)) | |
3f4ff561 LPC |
702 | return regcache_sync_block_raw(map, block, cache_present, |
703 | block_base, start, end); | |
cfdeb8c3 | 704 | else |
3f4ff561 LPC |
705 | return regcache_sync_block_single(map, block, cache_present, |
706 | block_base, start, end); | |
cfdeb8c3 | 707 | } |