regmap: Add missing little endian functions
[cascardo/linux.git] / drivers / base / regmap / regmap.c
1 /*
2  * Register map access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
18 #include <linux/of.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/log2.h>
23
24 #define CREATE_TRACE_POINTS
25 #include "trace.h"
26
27 #include "internal.h"
28
29 /*
30  * Sometimes for failures during very early init the trace
31  * infrastructure isn't available early enough to be used.  For this
32  * sort of problem defining LOG_DEVICE will add printks for basic
33  * register I/O on a specific device.
34  */
35 #undef LOG_DEVICE
36
37 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
38                                unsigned int mask, unsigned int val,
39                                bool *change, bool force_write);
40
41 static int _regmap_bus_reg_read(void *context, unsigned int reg,
42                                 unsigned int *val);
43 static int _regmap_bus_read(void *context, unsigned int reg,
44                             unsigned int *val);
45 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
46                                        unsigned int val);
47 static int _regmap_bus_reg_write(void *context, unsigned int reg,
48                                  unsigned int val);
49 static int _regmap_bus_raw_write(void *context, unsigned int reg,
50                                  unsigned int val);
51
52 bool regmap_reg_in_ranges(unsigned int reg,
53                           const struct regmap_range *ranges,
54                           unsigned int nranges)
55 {
56         const struct regmap_range *r;
57         int i;
58
59         for (i = 0, r = ranges; i < nranges; i++, r++)
60                 if (regmap_reg_in_range(reg, r))
61                         return true;
62         return false;
63 }
64 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
65
66 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
67                               const struct regmap_access_table *table)
68 {
69         /* Check "no ranges" first */
70         if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
71                 return false;
72
73         /* In case zero "yes ranges" are supplied, any reg is OK */
74         if (!table->n_yes_ranges)
75                 return true;
76
77         return regmap_reg_in_ranges(reg, table->yes_ranges,
78                                     table->n_yes_ranges);
79 }
80 EXPORT_SYMBOL_GPL(regmap_check_range_table);
81
82 bool regmap_writeable(struct regmap *map, unsigned int reg)
83 {
84         if (map->max_register && reg > map->max_register)
85                 return false;
86
87         if (map->writeable_reg)
88                 return map->writeable_reg(map->dev, reg);
89
90         if (map->wr_table)
91                 return regmap_check_range_table(map, reg, map->wr_table);
92
93         return true;
94 }
95
96 bool regmap_readable(struct regmap *map, unsigned int reg)
97 {
98         if (!map->reg_read)
99                 return false;
100
101         if (map->max_register && reg > map->max_register)
102                 return false;
103
104         if (map->format.format_write)
105                 return false;
106
107         if (map->readable_reg)
108                 return map->readable_reg(map->dev, reg);
109
110         if (map->rd_table)
111                 return regmap_check_range_table(map, reg, map->rd_table);
112
113         return true;
114 }
115
116 bool regmap_volatile(struct regmap *map, unsigned int reg)
117 {
118         if (!map->format.format_write && !regmap_readable(map, reg))
119                 return false;
120
121         if (map->volatile_reg)
122                 return map->volatile_reg(map->dev, reg);
123
124         if (map->volatile_table)
125                 return regmap_check_range_table(map, reg, map->volatile_table);
126
127         if (map->cache_ops)
128                 return false;
129         else
130                 return true;
131 }
132
133 bool regmap_precious(struct regmap *map, unsigned int reg)
134 {
135         if (!regmap_readable(map, reg))
136                 return false;
137
138         if (map->precious_reg)
139                 return map->precious_reg(map->dev, reg);
140
141         if (map->precious_table)
142                 return regmap_check_range_table(map, reg, map->precious_table);
143
144         return false;
145 }
146
147 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
148         size_t num)
149 {
150         unsigned int i;
151
152         for (i = 0; i < num; i++)
153                 if (!regmap_volatile(map, reg + i))
154                         return false;
155
156         return true;
157 }
158
159 static void regmap_format_2_6_write(struct regmap *map,
160                                      unsigned int reg, unsigned int val)
161 {
162         u8 *out = map->work_buf;
163
164         *out = (reg << 6) | val;
165 }
166
167 static void regmap_format_4_12_write(struct regmap *map,
168                                      unsigned int reg, unsigned int val)
169 {
170         __be16 *out = map->work_buf;
171         *out = cpu_to_be16((reg << 12) | val);
172 }
173
174 static void regmap_format_7_9_write(struct regmap *map,
175                                     unsigned int reg, unsigned int val)
176 {
177         __be16 *out = map->work_buf;
178         *out = cpu_to_be16((reg << 9) | val);
179 }
180
181 static void regmap_format_10_14_write(struct regmap *map,
182                                     unsigned int reg, unsigned int val)
183 {
184         u8 *out = map->work_buf;
185
186         out[2] = val;
187         out[1] = (val >> 8) | (reg << 6);
188         out[0] = reg >> 2;
189 }
190
191 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
192 {
193         u8 *b = buf;
194
195         b[0] = val << shift;
196 }
197
198 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
199 {
200         __be16 *b = buf;
201
202         b[0] = cpu_to_be16(val << shift);
203 }
204
205 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
206 {
207         __le16 *b = buf;
208
209         b[0] = cpu_to_le16(val << shift);
210 }
211
212 static void regmap_format_16_native(void *buf, unsigned int val,
213                                     unsigned int shift)
214 {
215         *(u16 *)buf = val << shift;
216 }
217
218 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
219 {
220         u8 *b = buf;
221
222         val <<= shift;
223
224         b[0] = val >> 16;
225         b[1] = val >> 8;
226         b[2] = val;
227 }
228
229 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
230 {
231         __be32 *b = buf;
232
233         b[0] = cpu_to_be32(val << shift);
234 }
235
236 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
237 {
238         __le32 *b = buf;
239
240         b[0] = cpu_to_le32(val << shift);
241 }
242
243 static void regmap_format_32_native(void *buf, unsigned int val,
244                                     unsigned int shift)
245 {
246         *(u32 *)buf = val << shift;
247 }
248
249 #ifdef CONFIG_64BIT
250 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
251 {
252         __be64 *b = buf;
253
254         b[0] = cpu_to_be64((u64)val << shift);
255 }
256
257 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
258 {
259         __le64 *b = buf;
260
261         b[0] = cpu_to_le64((u64)val << shift);
262 }
263
264 static void regmap_format_64_native(void *buf, unsigned int val,
265                                     unsigned int shift)
266 {
267         *(u64 *)buf = (u64)val << shift;
268 }
269 #endif
270
271 static void regmap_parse_inplace_noop(void *buf)
272 {
273 }
274
275 static unsigned int regmap_parse_8(const void *buf)
276 {
277         const u8 *b = buf;
278
279         return b[0];
280 }
281
282 static unsigned int regmap_parse_16_be(const void *buf)
283 {
284         const __be16 *b = buf;
285
286         return be16_to_cpu(b[0]);
287 }
288
289 static unsigned int regmap_parse_16_le(const void *buf)
290 {
291         const __le16 *b = buf;
292
293         return le16_to_cpu(b[0]);
294 }
295
296 static void regmap_parse_16_be_inplace(void *buf)
297 {
298         __be16 *b = buf;
299
300         b[0] = be16_to_cpu(b[0]);
301 }
302
303 static void regmap_parse_16_le_inplace(void *buf)
304 {
305         __le16 *b = buf;
306
307         b[0] = le16_to_cpu(b[0]);
308 }
309
310 static unsigned int regmap_parse_16_native(const void *buf)
311 {
312         return *(u16 *)buf;
313 }
314
315 static unsigned int regmap_parse_24(const void *buf)
316 {
317         const u8 *b = buf;
318         unsigned int ret = b[2];
319         ret |= ((unsigned int)b[1]) << 8;
320         ret |= ((unsigned int)b[0]) << 16;
321
322         return ret;
323 }
324
325 static unsigned int regmap_parse_32_be(const void *buf)
326 {
327         const __be32 *b = buf;
328
329         return be32_to_cpu(b[0]);
330 }
331
332 static unsigned int regmap_parse_32_le(const void *buf)
333 {
334         const __le32 *b = buf;
335
336         return le32_to_cpu(b[0]);
337 }
338
339 static void regmap_parse_32_be_inplace(void *buf)
340 {
341         __be32 *b = buf;
342
343         b[0] = be32_to_cpu(b[0]);
344 }
345
346 static void regmap_parse_32_le_inplace(void *buf)
347 {
348         __le32 *b = buf;
349
350         b[0] = le32_to_cpu(b[0]);
351 }
352
353 static unsigned int regmap_parse_32_native(const void *buf)
354 {
355         return *(u32 *)buf;
356 }
357
358 #ifdef CONFIG_64BIT
359 static unsigned int regmap_parse_64_be(const void *buf)
360 {
361         const __be64 *b = buf;
362
363         return be64_to_cpu(b[0]);
364 }
365
366 static unsigned int regmap_parse_64_le(const void *buf)
367 {
368         const __le64 *b = buf;
369
370         return le64_to_cpu(b[0]);
371 }
372
373 static void regmap_parse_64_be_inplace(void *buf)
374 {
375         __be64 *b = buf;
376
377         b[0] = be64_to_cpu(b[0]);
378 }
379
380 static void regmap_parse_64_le_inplace(void *buf)
381 {
382         __le64 *b = buf;
383
384         b[0] = le64_to_cpu(b[0]);
385 }
386
387 static unsigned int regmap_parse_64_native(const void *buf)
388 {
389         return *(u64 *)buf;
390 }
391 #endif
392
393 static void regmap_lock_mutex(void *__map)
394 {
395         struct regmap *map = __map;
396         mutex_lock(&map->mutex);
397 }
398
399 static void regmap_unlock_mutex(void *__map)
400 {
401         struct regmap *map = __map;
402         mutex_unlock(&map->mutex);
403 }
404
405 static void regmap_lock_spinlock(void *__map)
406 __acquires(&map->spinlock)
407 {
408         struct regmap *map = __map;
409         unsigned long flags;
410
411         spin_lock_irqsave(&map->spinlock, flags);
412         map->spinlock_flags = flags;
413 }
414
415 static void regmap_unlock_spinlock(void *__map)
416 __releases(&map->spinlock)
417 {
418         struct regmap *map = __map;
419         spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
420 }
421
422 static void dev_get_regmap_release(struct device *dev, void *res)
423 {
424         /*
425          * We don't actually have anything to do here; the goal here
426          * is not to manage the regmap but to provide a simple way to
427          * get the regmap back given a struct device.
428          */
429 }
430
431 static bool _regmap_range_add(struct regmap *map,
432                               struct regmap_range_node *data)
433 {
434         struct rb_root *root = &map->range_tree;
435         struct rb_node **new = &(root->rb_node), *parent = NULL;
436
437         while (*new) {
438                 struct regmap_range_node *this =
439                         container_of(*new, struct regmap_range_node, node);
440
441                 parent = *new;
442                 if (data->range_max < this->range_min)
443                         new = &((*new)->rb_left);
444                 else if (data->range_min > this->range_max)
445                         new = &((*new)->rb_right);
446                 else
447                         return false;
448         }
449
450         rb_link_node(&data->node, parent, new);
451         rb_insert_color(&data->node, root);
452
453         return true;
454 }
455
456 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
457                                                       unsigned int reg)
458 {
459         struct rb_node *node = map->range_tree.rb_node;
460
461         while (node) {
462                 struct regmap_range_node *this =
463                         container_of(node, struct regmap_range_node, node);
464
465                 if (reg < this->range_min)
466                         node = node->rb_left;
467                 else if (reg > this->range_max)
468                         node = node->rb_right;
469                 else
470                         return this;
471         }
472
473         return NULL;
474 }
475
476 static void regmap_range_exit(struct regmap *map)
477 {
478         struct rb_node *next;
479         struct regmap_range_node *range_node;
480
481         next = rb_first(&map->range_tree);
482         while (next) {
483                 range_node = rb_entry(next, struct regmap_range_node, node);
484                 next = rb_next(&range_node->node);
485                 rb_erase(&range_node->node, &map->range_tree);
486                 kfree(range_node);
487         }
488
489         kfree(map->selector_work_buf);
490 }
491
492 int regmap_attach_dev(struct device *dev, struct regmap *map,
493                       const struct regmap_config *config)
494 {
495         struct regmap **m;
496
497         map->dev = dev;
498
499         regmap_debugfs_init(map, config->name);
500
501         /* Add a devres resource for dev_get_regmap() */
502         m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
503         if (!m) {
504                 regmap_debugfs_exit(map);
505                 return -ENOMEM;
506         }
507         *m = map;
508         devres_add(dev, m);
509
510         return 0;
511 }
512 EXPORT_SYMBOL_GPL(regmap_attach_dev);
513
514 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
515                                         const struct regmap_config *config)
516 {
517         enum regmap_endian endian;
518
519         /* Retrieve the endianness specification from the regmap config */
520         endian = config->reg_format_endian;
521
522         /* If the regmap config specified a non-default value, use that */
523         if (endian != REGMAP_ENDIAN_DEFAULT)
524                 return endian;
525
526         /* Retrieve the endianness specification from the bus config */
527         if (bus && bus->reg_format_endian_default)
528                 endian = bus->reg_format_endian_default;
529
530         /* If the bus specified a non-default value, use that */
531         if (endian != REGMAP_ENDIAN_DEFAULT)
532                 return endian;
533
534         /* Use this if no other value was found */
535         return REGMAP_ENDIAN_BIG;
536 }
537
538 enum regmap_endian regmap_get_val_endian(struct device *dev,
539                                          const struct regmap_bus *bus,
540                                          const struct regmap_config *config)
541 {
542         struct device_node *np;
543         enum regmap_endian endian;
544
545         /* Retrieve the endianness specification from the regmap config */
546         endian = config->val_format_endian;
547
548         /* If the regmap config specified a non-default value, use that */
549         if (endian != REGMAP_ENDIAN_DEFAULT)
550                 return endian;
551
552         /* If the dev and dev->of_node exist try to get endianness from DT */
553         if (dev && dev->of_node) {
554                 np = dev->of_node;
555
556                 /* Parse the device's DT node for an endianness specification */
557                 if (of_property_read_bool(np, "big-endian"))
558                         endian = REGMAP_ENDIAN_BIG;
559                 else if (of_property_read_bool(np, "little-endian"))
560                         endian = REGMAP_ENDIAN_LITTLE;
561                 else if (of_property_read_bool(np, "native-endian"))
562                         endian = REGMAP_ENDIAN_NATIVE;
563
564                 /* If the endianness was specified in DT, use that */
565                 if (endian != REGMAP_ENDIAN_DEFAULT)
566                         return endian;
567         }
568
569         /* Retrieve the endianness specification from the bus config */
570         if (bus && bus->val_format_endian_default)
571                 endian = bus->val_format_endian_default;
572
573         /* If the bus specified a non-default value, use that */
574         if (endian != REGMAP_ENDIAN_DEFAULT)
575                 return endian;
576
577         /* Use this if no other value was found */
578         return REGMAP_ENDIAN_BIG;
579 }
580 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
581
582 struct regmap *__regmap_init(struct device *dev,
583                              const struct regmap_bus *bus,
584                              void *bus_context,
585                              const struct regmap_config *config,
586                              struct lock_class_key *lock_key,
587                              const char *lock_name)
588 {
589         struct regmap *map;
590         int ret = -EINVAL;
591         enum regmap_endian reg_endian, val_endian;
592         int i, j;
593
594         if (!config)
595                 goto err;
596
597         map = kzalloc(sizeof(*map), GFP_KERNEL);
598         if (map == NULL) {
599                 ret = -ENOMEM;
600                 goto err;
601         }
602
603         if (config->lock && config->unlock) {
604                 map->lock = config->lock;
605                 map->unlock = config->unlock;
606                 map->lock_arg = config->lock_arg;
607         } else {
608                 if ((bus && bus->fast_io) ||
609                     config->fast_io) {
610                         spin_lock_init(&map->spinlock);
611                         map->lock = regmap_lock_spinlock;
612                         map->unlock = regmap_unlock_spinlock;
613                         lockdep_set_class_and_name(&map->spinlock,
614                                                    lock_key, lock_name);
615                 } else {
616                         mutex_init(&map->mutex);
617                         map->lock = regmap_lock_mutex;
618                         map->unlock = regmap_unlock_mutex;
619                         lockdep_set_class_and_name(&map->mutex,
620                                                    lock_key, lock_name);
621                 }
622                 map->lock_arg = map;
623         }
624
625         /*
626          * When we write in fast-paths with regmap_bulk_write() don't allocate
627          * scratch buffers with sleeping allocations.
628          */
629         if ((bus && bus->fast_io) || config->fast_io)
630                 map->alloc_flags = GFP_ATOMIC;
631         else
632                 map->alloc_flags = GFP_KERNEL;
633
634         map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
635         map->format.pad_bytes = config->pad_bits / 8;
636         map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
637         map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
638                         config->val_bits + config->pad_bits, 8);
639         map->reg_shift = config->pad_bits % 8;
640         if (config->reg_stride)
641                 map->reg_stride = config->reg_stride;
642         else
643                 map->reg_stride = 1;
644         if (is_power_of_2(map->reg_stride))
645                 map->reg_stride_order = ilog2(map->reg_stride);
646         else
647                 map->reg_stride_order = -1;
648         map->use_single_read = config->use_single_rw || !bus || !bus->read;
649         map->use_single_write = config->use_single_rw || !bus || !bus->write;
650         map->can_multi_write = config->can_multi_write && bus && bus->write;
651         if (bus) {
652                 map->max_raw_read = bus->max_raw_read;
653                 map->max_raw_write = bus->max_raw_write;
654         }
655         map->dev = dev;
656         map->bus = bus;
657         map->bus_context = bus_context;
658         map->max_register = config->max_register;
659         map->wr_table = config->wr_table;
660         map->rd_table = config->rd_table;
661         map->volatile_table = config->volatile_table;
662         map->precious_table = config->precious_table;
663         map->writeable_reg = config->writeable_reg;
664         map->readable_reg = config->readable_reg;
665         map->volatile_reg = config->volatile_reg;
666         map->precious_reg = config->precious_reg;
667         map->cache_type = config->cache_type;
668         map->name = config->name;
669
670         spin_lock_init(&map->async_lock);
671         INIT_LIST_HEAD(&map->async_list);
672         INIT_LIST_HEAD(&map->async_free);
673         init_waitqueue_head(&map->async_waitq);
674
675         if (config->read_flag_mask || config->write_flag_mask) {
676                 map->read_flag_mask = config->read_flag_mask;
677                 map->write_flag_mask = config->write_flag_mask;
678         } else if (bus) {
679                 map->read_flag_mask = bus->read_flag_mask;
680         }
681
682         if (!bus) {
683                 map->reg_read  = config->reg_read;
684                 map->reg_write = config->reg_write;
685
686                 map->defer_caching = false;
687                 goto skip_format_initialization;
688         } else if (!bus->read || !bus->write) {
689                 map->reg_read = _regmap_bus_reg_read;
690                 map->reg_write = _regmap_bus_reg_write;
691
692                 map->defer_caching = false;
693                 goto skip_format_initialization;
694         } else {
695                 map->reg_read  = _regmap_bus_read;
696                 map->reg_update_bits = bus->reg_update_bits;
697         }
698
699         reg_endian = regmap_get_reg_endian(bus, config);
700         val_endian = regmap_get_val_endian(dev, bus, config);
701
702         switch (config->reg_bits + map->reg_shift) {
703         case 2:
704                 switch (config->val_bits) {
705                 case 6:
706                         map->format.format_write = regmap_format_2_6_write;
707                         break;
708                 default:
709                         goto err_map;
710                 }
711                 break;
712
713         case 4:
714                 switch (config->val_bits) {
715                 case 12:
716                         map->format.format_write = regmap_format_4_12_write;
717                         break;
718                 default:
719                         goto err_map;
720                 }
721                 break;
722
723         case 7:
724                 switch (config->val_bits) {
725                 case 9:
726                         map->format.format_write = regmap_format_7_9_write;
727                         break;
728                 default:
729                         goto err_map;
730                 }
731                 break;
732
733         case 10:
734                 switch (config->val_bits) {
735                 case 14:
736                         map->format.format_write = regmap_format_10_14_write;
737                         break;
738                 default:
739                         goto err_map;
740                 }
741                 break;
742
743         case 8:
744                 map->format.format_reg = regmap_format_8;
745                 break;
746
747         case 16:
748                 switch (reg_endian) {
749                 case REGMAP_ENDIAN_BIG:
750                         map->format.format_reg = regmap_format_16_be;
751                         break;
752                 case REGMAP_ENDIAN_LITTLE:
753                         map->format.format_reg = regmap_format_16_le;
754                         break;
755                 case REGMAP_ENDIAN_NATIVE:
756                         map->format.format_reg = regmap_format_16_native;
757                         break;
758                 default:
759                         goto err_map;
760                 }
761                 break;
762
763         case 24:
764                 if (reg_endian != REGMAP_ENDIAN_BIG)
765                         goto err_map;
766                 map->format.format_reg = regmap_format_24;
767                 break;
768
769         case 32:
770                 switch (reg_endian) {
771                 case REGMAP_ENDIAN_BIG:
772                         map->format.format_reg = regmap_format_32_be;
773                         break;
774                 case REGMAP_ENDIAN_LITTLE:
775                         map->format.format_reg = regmap_format_32_le;
776                         break;
777                 case REGMAP_ENDIAN_NATIVE:
778                         map->format.format_reg = regmap_format_32_native;
779                         break;
780                 default:
781                         goto err_map;
782                 }
783                 break;
784
785 #ifdef CONFIG_64BIT
786         case 64:
787                 switch (reg_endian) {
788                 case REGMAP_ENDIAN_BIG:
789                         map->format.format_reg = regmap_format_64_be;
790                         break;
791                 case REGMAP_ENDIAN_LITTLE:
792                         map->format.format_reg = regmap_format_64_le;
793                         break;
794                 case REGMAP_ENDIAN_NATIVE:
795                         map->format.format_reg = regmap_format_64_native;
796                         break;
797                 default:
798                         goto err_map;
799                 }
800                 break;
801 #endif
802
803         default:
804                 goto err_map;
805         }
806
807         if (val_endian == REGMAP_ENDIAN_NATIVE)
808                 map->format.parse_inplace = regmap_parse_inplace_noop;
809
810         switch (config->val_bits) {
811         case 8:
812                 map->format.format_val = regmap_format_8;
813                 map->format.parse_val = regmap_parse_8;
814                 map->format.parse_inplace = regmap_parse_inplace_noop;
815                 break;
816         case 16:
817                 switch (val_endian) {
818                 case REGMAP_ENDIAN_BIG:
819                         map->format.format_val = regmap_format_16_be;
820                         map->format.parse_val = regmap_parse_16_be;
821                         map->format.parse_inplace = regmap_parse_16_be_inplace;
822                         break;
823                 case REGMAP_ENDIAN_LITTLE:
824                         map->format.format_val = regmap_format_16_le;
825                         map->format.parse_val = regmap_parse_16_le;
826                         map->format.parse_inplace = regmap_parse_16_le_inplace;
827                         break;
828                 case REGMAP_ENDIAN_NATIVE:
829                         map->format.format_val = regmap_format_16_native;
830                         map->format.parse_val = regmap_parse_16_native;
831                         break;
832                 default:
833                         goto err_map;
834                 }
835                 break;
836         case 24:
837                 if (val_endian != REGMAP_ENDIAN_BIG)
838                         goto err_map;
839                 map->format.format_val = regmap_format_24;
840                 map->format.parse_val = regmap_parse_24;
841                 break;
842         case 32:
843                 switch (val_endian) {
844                 case REGMAP_ENDIAN_BIG:
845                         map->format.format_val = regmap_format_32_be;
846                         map->format.parse_val = regmap_parse_32_be;
847                         map->format.parse_inplace = regmap_parse_32_be_inplace;
848                         break;
849                 case REGMAP_ENDIAN_LITTLE:
850                         map->format.format_val = regmap_format_32_le;
851                         map->format.parse_val = regmap_parse_32_le;
852                         map->format.parse_inplace = regmap_parse_32_le_inplace;
853                         break;
854                 case REGMAP_ENDIAN_NATIVE:
855                         map->format.format_val = regmap_format_32_native;
856                         map->format.parse_val = regmap_parse_32_native;
857                         break;
858                 default:
859                         goto err_map;
860                 }
861                 break;
862 #ifdef CONFIG_64BIT
863         case 64:
864                 switch (val_endian) {
865                 case REGMAP_ENDIAN_BIG:
866                         map->format.format_val = regmap_format_64_be;
867                         map->format.parse_val = regmap_parse_64_be;
868                         map->format.parse_inplace = regmap_parse_64_be_inplace;
869                         break;
870                 case REGMAP_ENDIAN_LITTLE:
871                         map->format.format_val = regmap_format_64_le;
872                         map->format.parse_val = regmap_parse_64_le;
873                         map->format.parse_inplace = regmap_parse_64_le_inplace;
874                         break;
875                 case REGMAP_ENDIAN_NATIVE:
876                         map->format.format_val = regmap_format_64_native;
877                         map->format.parse_val = regmap_parse_64_native;
878                         break;
879                 default:
880                         goto err_map;
881                 }
882                 break;
883 #endif
884         }
885
886         if (map->format.format_write) {
887                 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
888                     (val_endian != REGMAP_ENDIAN_BIG))
889                         goto err_map;
890                 map->use_single_write = true;
891         }
892
893         if (!map->format.format_write &&
894             !(map->format.format_reg && map->format.format_val))
895                 goto err_map;
896
897         map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
898         if (map->work_buf == NULL) {
899                 ret = -ENOMEM;
900                 goto err_map;
901         }
902
903         if (map->format.format_write) {
904                 map->defer_caching = false;
905                 map->reg_write = _regmap_bus_formatted_write;
906         } else if (map->format.format_val) {
907                 map->defer_caching = true;
908                 map->reg_write = _regmap_bus_raw_write;
909         }
910
911 skip_format_initialization:
912
913         map->range_tree = RB_ROOT;
914         for (i = 0; i < config->num_ranges; i++) {
915                 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
916                 struct regmap_range_node *new;
917
918                 /* Sanity check */
919                 if (range_cfg->range_max < range_cfg->range_min) {
920                         dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
921                                 range_cfg->range_max, range_cfg->range_min);
922                         goto err_range;
923                 }
924
925                 if (range_cfg->range_max > map->max_register) {
926                         dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
927                                 range_cfg->range_max, map->max_register);
928                         goto err_range;
929                 }
930
931                 if (range_cfg->selector_reg > map->max_register) {
932                         dev_err(map->dev,
933                                 "Invalid range %d: selector out of map\n", i);
934                         goto err_range;
935                 }
936
937                 if (range_cfg->window_len == 0) {
938                         dev_err(map->dev, "Invalid range %d: window_len 0\n",
939                                 i);
940                         goto err_range;
941                 }
942
943                 /* Make sure, that this register range has no selector
944                    or data window within its boundary */
945                 for (j = 0; j < config->num_ranges; j++) {
946                         unsigned sel_reg = config->ranges[j].selector_reg;
947                         unsigned win_min = config->ranges[j].window_start;
948                         unsigned win_max = win_min +
949                                            config->ranges[j].window_len - 1;
950
951                         /* Allow data window inside its own virtual range */
952                         if (j == i)
953                                 continue;
954
955                         if (range_cfg->range_min <= sel_reg &&
956                             sel_reg <= range_cfg->range_max) {
957                                 dev_err(map->dev,
958                                         "Range %d: selector for %d in window\n",
959                                         i, j);
960                                 goto err_range;
961                         }
962
963                         if (!(win_max < range_cfg->range_min ||
964                               win_min > range_cfg->range_max)) {
965                                 dev_err(map->dev,
966                                         "Range %d: window for %d in window\n",
967                                         i, j);
968                                 goto err_range;
969                         }
970                 }
971
972                 new = kzalloc(sizeof(*new), GFP_KERNEL);
973                 if (new == NULL) {
974                         ret = -ENOMEM;
975                         goto err_range;
976                 }
977
978                 new->map = map;
979                 new->name = range_cfg->name;
980                 new->range_min = range_cfg->range_min;
981                 new->range_max = range_cfg->range_max;
982                 new->selector_reg = range_cfg->selector_reg;
983                 new->selector_mask = range_cfg->selector_mask;
984                 new->selector_shift = range_cfg->selector_shift;
985                 new->window_start = range_cfg->window_start;
986                 new->window_len = range_cfg->window_len;
987
988                 if (!_regmap_range_add(map, new)) {
989                         dev_err(map->dev, "Failed to add range %d\n", i);
990                         kfree(new);
991                         goto err_range;
992                 }
993
994                 if (map->selector_work_buf == NULL) {
995                         map->selector_work_buf =
996                                 kzalloc(map->format.buf_size, GFP_KERNEL);
997                         if (map->selector_work_buf == NULL) {
998                                 ret = -ENOMEM;
999                                 goto err_range;
1000                         }
1001                 }
1002         }
1003
1004         ret = regcache_init(map, config);
1005         if (ret != 0)
1006                 goto err_range;
1007
1008         if (dev) {
1009                 ret = regmap_attach_dev(dev, map, config);
1010                 if (ret != 0)
1011                         goto err_regcache;
1012         }
1013
1014         return map;
1015
1016 err_regcache:
1017         regcache_exit(map);
1018 err_range:
1019         regmap_range_exit(map);
1020         kfree(map->work_buf);
1021 err_map:
1022         kfree(map);
1023 err:
1024         return ERR_PTR(ret);
1025 }
1026 EXPORT_SYMBOL_GPL(__regmap_init);
1027
1028 static void devm_regmap_release(struct device *dev, void *res)
1029 {
1030         regmap_exit(*(struct regmap **)res);
1031 }
1032
1033 struct regmap *__devm_regmap_init(struct device *dev,
1034                                   const struct regmap_bus *bus,
1035                                   void *bus_context,
1036                                   const struct regmap_config *config,
1037                                   struct lock_class_key *lock_key,
1038                                   const char *lock_name)
1039 {
1040         struct regmap **ptr, *regmap;
1041
1042         ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1043         if (!ptr)
1044                 return ERR_PTR(-ENOMEM);
1045
1046         regmap = __regmap_init(dev, bus, bus_context, config,
1047                                lock_key, lock_name);
1048         if (!IS_ERR(regmap)) {
1049                 *ptr = regmap;
1050                 devres_add(dev, ptr);
1051         } else {
1052                 devres_free(ptr);
1053         }
1054
1055         return regmap;
1056 }
1057 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1058
1059 static void regmap_field_init(struct regmap_field *rm_field,
1060         struct regmap *regmap, struct reg_field reg_field)
1061 {
1062         rm_field->regmap = regmap;
1063         rm_field->reg = reg_field.reg;
1064         rm_field->shift = reg_field.lsb;
1065         rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1066         rm_field->id_size = reg_field.id_size;
1067         rm_field->id_offset = reg_field.id_offset;
1068 }
1069
1070 /**
1071  * devm_regmap_field_alloc(): Allocate and initialise a register field
1072  * in a register map.
1073  *
1074  * @dev: Device that will be interacted with
1075  * @regmap: regmap bank in which this register field is located.
1076  * @reg_field: Register field with in the bank.
1077  *
1078  * The return value will be an ERR_PTR() on error or a valid pointer
1079  * to a struct regmap_field. The regmap_field will be automatically freed
1080  * by the device management code.
1081  */
1082 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1083                 struct regmap *regmap, struct reg_field reg_field)
1084 {
1085         struct regmap_field *rm_field = devm_kzalloc(dev,
1086                                         sizeof(*rm_field), GFP_KERNEL);
1087         if (!rm_field)
1088                 return ERR_PTR(-ENOMEM);
1089
1090         regmap_field_init(rm_field, regmap, reg_field);
1091
1092         return rm_field;
1093
1094 }
1095 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1096
1097 /**
1098  * devm_regmap_field_free(): Free register field allocated using
1099  * devm_regmap_field_alloc. Usally drivers need not call this function,
1100  * as the memory allocated via devm will be freed as per device-driver
1101  * life-cyle.
1102  *
1103  * @dev: Device that will be interacted with
1104  * @field: regmap field which should be freed.
1105  */
1106 void devm_regmap_field_free(struct device *dev,
1107         struct regmap_field *field)
1108 {
1109         devm_kfree(dev, field);
1110 }
1111 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1112
1113 /**
1114  * regmap_field_alloc(): Allocate and initialise a register field
1115  * in a register map.
1116  *
1117  * @regmap: regmap bank in which this register field is located.
1118  * @reg_field: Register field with in the bank.
1119  *
1120  * The return value will be an ERR_PTR() on error or a valid pointer
1121  * to a struct regmap_field. The regmap_field should be freed by the
1122  * user once its finished working with it using regmap_field_free().
1123  */
1124 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1125                 struct reg_field reg_field)
1126 {
1127         struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1128
1129         if (!rm_field)
1130                 return ERR_PTR(-ENOMEM);
1131
1132         regmap_field_init(rm_field, regmap, reg_field);
1133
1134         return rm_field;
1135 }
1136 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1137
1138 /**
1139  * regmap_field_free(): Free register field allocated using regmap_field_alloc
1140  *
1141  * @field: regmap field which should be freed.
1142  */
1143 void regmap_field_free(struct regmap_field *field)
1144 {
1145         kfree(field);
1146 }
1147 EXPORT_SYMBOL_GPL(regmap_field_free);
1148
1149 /**
1150  * regmap_reinit_cache(): Reinitialise the current register cache
1151  *
1152  * @map: Register map to operate on.
1153  * @config: New configuration.  Only the cache data will be used.
1154  *
1155  * Discard any existing register cache for the map and initialize a
1156  * new cache.  This can be used to restore the cache to defaults or to
1157  * update the cache configuration to reflect runtime discovery of the
1158  * hardware.
1159  *
1160  * No explicit locking is done here, the user needs to ensure that
1161  * this function will not race with other calls to regmap.
1162  */
1163 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1164 {
1165         regcache_exit(map);
1166         regmap_debugfs_exit(map);
1167
1168         map->max_register = config->max_register;
1169         map->writeable_reg = config->writeable_reg;
1170         map->readable_reg = config->readable_reg;
1171         map->volatile_reg = config->volatile_reg;
1172         map->precious_reg = config->precious_reg;
1173         map->cache_type = config->cache_type;
1174
1175         regmap_debugfs_init(map, config->name);
1176
1177         map->cache_bypass = false;
1178         map->cache_only = false;
1179
1180         return regcache_init(map, config);
1181 }
1182 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1183
1184 /**
1185  * regmap_exit(): Free a previously allocated register map
1186  */
1187 void regmap_exit(struct regmap *map)
1188 {
1189         struct regmap_async *async;
1190
1191         regcache_exit(map);
1192         regmap_debugfs_exit(map);
1193         regmap_range_exit(map);
1194         if (map->bus && map->bus->free_context)
1195                 map->bus->free_context(map->bus_context);
1196         kfree(map->work_buf);
1197         while (!list_empty(&map->async_free)) {
1198                 async = list_first_entry_or_null(&map->async_free,
1199                                                  struct regmap_async,
1200                                                  list);
1201                 list_del(&async->list);
1202                 kfree(async->work_buf);
1203                 kfree(async);
1204         }
1205         kfree(map);
1206 }
1207 EXPORT_SYMBOL_GPL(regmap_exit);
1208
1209 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1210 {
1211         struct regmap **r = res;
1212         if (!r || !*r) {
1213                 WARN_ON(!r || !*r);
1214                 return 0;
1215         }
1216
1217         /* If the user didn't specify a name match any */
1218         if (data)
1219                 return (*r)->name == data;
1220         else
1221                 return 1;
1222 }
1223
1224 /**
1225  * dev_get_regmap(): Obtain the regmap (if any) for a device
1226  *
1227  * @dev: Device to retrieve the map for
1228  * @name: Optional name for the register map, usually NULL.
1229  *
1230  * Returns the regmap for the device if one is present, or NULL.  If
1231  * name is specified then it must match the name specified when
1232  * registering the device, if it is NULL then the first regmap found
1233  * will be used.  Devices with multiple register maps are very rare,
1234  * generic code should normally not need to specify a name.
1235  */
1236 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1237 {
1238         struct regmap **r = devres_find(dev, dev_get_regmap_release,
1239                                         dev_get_regmap_match, (void *)name);
1240
1241         if (!r)
1242                 return NULL;
1243         return *r;
1244 }
1245 EXPORT_SYMBOL_GPL(dev_get_regmap);
1246
1247 /**
1248  * regmap_get_device(): Obtain the device from a regmap
1249  *
1250  * @map: Register map to operate on.
1251  *
1252  * Returns the underlying device that the regmap has been created for.
1253  */
1254 struct device *regmap_get_device(struct regmap *map)
1255 {
1256         return map->dev;
1257 }
1258 EXPORT_SYMBOL_GPL(regmap_get_device);
1259
1260 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1261                                struct regmap_range_node *range,
1262                                unsigned int val_num)
1263 {
1264         void *orig_work_buf;
1265         unsigned int win_offset;
1266         unsigned int win_page;
1267         bool page_chg;
1268         int ret;
1269
1270         win_offset = (*reg - range->range_min) % range->window_len;
1271         win_page = (*reg - range->range_min) / range->window_len;
1272
1273         if (val_num > 1) {
1274                 /* Bulk write shouldn't cross range boundary */
1275                 if (*reg + val_num - 1 > range->range_max)
1276                         return -EINVAL;
1277
1278                 /* ... or single page boundary */
1279                 if (val_num > range->window_len - win_offset)
1280                         return -EINVAL;
1281         }
1282
1283         /* It is possible to have selector register inside data window.
1284            In that case, selector register is located on every page and
1285            it needs no page switching, when accessed alone. */
1286         if (val_num > 1 ||
1287             range->window_start + win_offset != range->selector_reg) {
1288                 /* Use separate work_buf during page switching */
1289                 orig_work_buf = map->work_buf;
1290                 map->work_buf = map->selector_work_buf;
1291
1292                 ret = _regmap_update_bits(map, range->selector_reg,
1293                                           range->selector_mask,
1294                                           win_page << range->selector_shift,
1295                                           &page_chg, false);
1296
1297                 map->work_buf = orig_work_buf;
1298
1299                 if (ret != 0)
1300                         return ret;
1301         }
1302
1303         *reg = range->window_start + win_offset;
1304
1305         return 0;
1306 }
1307
1308 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1309                                           unsigned long mask)
1310 {
1311         u8 *buf;
1312         int i;
1313
1314         if (!mask || !map->work_buf)
1315                 return;
1316
1317         buf = map->work_buf;
1318
1319         for (i = 0; i < max_bytes; i++)
1320                 buf[i] |= (mask >> (8 * i)) & 0xff;
1321 }
1322
1323 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1324                       const void *val, size_t val_len)
1325 {
1326         struct regmap_range_node *range;
1327         unsigned long flags;
1328         void *work_val = map->work_buf + map->format.reg_bytes +
1329                 map->format.pad_bytes;
1330         void *buf;
1331         int ret = -ENOTSUPP;
1332         size_t len;
1333         int i;
1334
1335         WARN_ON(!map->bus);
1336
1337         /* Check for unwritable registers before we start */
1338         if (map->writeable_reg)
1339                 for (i = 0; i < val_len / map->format.val_bytes; i++)
1340                         if (!map->writeable_reg(map->dev,
1341                                                reg + regmap_get_offset(map, i)))
1342                                 return -EINVAL;
1343
1344         if (!map->cache_bypass && map->format.parse_val) {
1345                 unsigned int ival;
1346                 int val_bytes = map->format.val_bytes;
1347                 for (i = 0; i < val_len / val_bytes; i++) {
1348                         ival = map->format.parse_val(val + (i * val_bytes));
1349                         ret = regcache_write(map,
1350                                              reg + regmap_get_offset(map, i),
1351                                              ival);
1352                         if (ret) {
1353                                 dev_err(map->dev,
1354                                         "Error in caching of register: %x ret: %d\n",
1355                                         reg + i, ret);
1356                                 return ret;
1357                         }
1358                 }
1359                 if (map->cache_only) {
1360                         map->cache_dirty = true;
1361                         return 0;
1362                 }
1363         }
1364
1365         range = _regmap_range_lookup(map, reg);
1366         if (range) {
1367                 int val_num = val_len / map->format.val_bytes;
1368                 int win_offset = (reg - range->range_min) % range->window_len;
1369                 int win_residue = range->window_len - win_offset;
1370
1371                 /* If the write goes beyond the end of the window split it */
1372                 while (val_num > win_residue) {
1373                         dev_dbg(map->dev, "Writing window %d/%zu\n",
1374                                 win_residue, val_len / map->format.val_bytes);
1375                         ret = _regmap_raw_write(map, reg, val, win_residue *
1376                                                 map->format.val_bytes);
1377                         if (ret != 0)
1378                                 return ret;
1379
1380                         reg += win_residue;
1381                         val_num -= win_residue;
1382                         val += win_residue * map->format.val_bytes;
1383                         val_len -= win_residue * map->format.val_bytes;
1384
1385                         win_offset = (reg - range->range_min) %
1386                                 range->window_len;
1387                         win_residue = range->window_len - win_offset;
1388                 }
1389
1390                 ret = _regmap_select_page(map, &reg, range, val_num);
1391                 if (ret != 0)
1392                         return ret;
1393         }
1394
1395         map->format.format_reg(map->work_buf, reg, map->reg_shift);
1396         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1397                                       map->write_flag_mask);
1398
1399         /*
1400          * Essentially all I/O mechanisms will be faster with a single
1401          * buffer to write.  Since register syncs often generate raw
1402          * writes of single registers optimise that case.
1403          */
1404         if (val != work_val && val_len == map->format.val_bytes) {
1405                 memcpy(work_val, val, map->format.val_bytes);
1406                 val = work_val;
1407         }
1408
1409         if (map->async && map->bus->async_write) {
1410                 struct regmap_async *async;
1411
1412                 trace_regmap_async_write_start(map, reg, val_len);
1413
1414                 spin_lock_irqsave(&map->async_lock, flags);
1415                 async = list_first_entry_or_null(&map->async_free,
1416                                                  struct regmap_async,
1417                                                  list);
1418                 if (async)
1419                         list_del(&async->list);
1420                 spin_unlock_irqrestore(&map->async_lock, flags);
1421
1422                 if (!async) {
1423                         async = map->bus->async_alloc();
1424                         if (!async)
1425                                 return -ENOMEM;
1426
1427                         async->work_buf = kzalloc(map->format.buf_size,
1428                                                   GFP_KERNEL | GFP_DMA);
1429                         if (!async->work_buf) {
1430                                 kfree(async);
1431                                 return -ENOMEM;
1432                         }
1433                 }
1434
1435                 async->map = map;
1436
1437                 /* If the caller supplied the value we can use it safely. */
1438                 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1439                        map->format.reg_bytes + map->format.val_bytes);
1440
1441                 spin_lock_irqsave(&map->async_lock, flags);
1442                 list_add_tail(&async->list, &map->async_list);
1443                 spin_unlock_irqrestore(&map->async_lock, flags);
1444
1445                 if (val != work_val)
1446                         ret = map->bus->async_write(map->bus_context,
1447                                                     async->work_buf,
1448                                                     map->format.reg_bytes +
1449                                                     map->format.pad_bytes,
1450                                                     val, val_len, async);
1451                 else
1452                         ret = map->bus->async_write(map->bus_context,
1453                                                     async->work_buf,
1454                                                     map->format.reg_bytes +
1455                                                     map->format.pad_bytes +
1456                                                     val_len, NULL, 0, async);
1457
1458                 if (ret != 0) {
1459                         dev_err(map->dev, "Failed to schedule write: %d\n",
1460                                 ret);
1461
1462                         spin_lock_irqsave(&map->async_lock, flags);
1463                         list_move(&async->list, &map->async_free);
1464                         spin_unlock_irqrestore(&map->async_lock, flags);
1465                 }
1466
1467                 return ret;
1468         }
1469
1470         trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1471
1472         /* If we're doing a single register write we can probably just
1473          * send the work_buf directly, otherwise try to do a gather
1474          * write.
1475          */
1476         if (val == work_val)
1477                 ret = map->bus->write(map->bus_context, map->work_buf,
1478                                       map->format.reg_bytes +
1479                                       map->format.pad_bytes +
1480                                       val_len);
1481         else if (map->bus->gather_write)
1482                 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1483                                              map->format.reg_bytes +
1484                                              map->format.pad_bytes,
1485                                              val, val_len);
1486
1487         /* If that didn't work fall back on linearising by hand. */
1488         if (ret == -ENOTSUPP) {
1489                 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1490                 buf = kzalloc(len, GFP_KERNEL);
1491                 if (!buf)
1492                         return -ENOMEM;
1493
1494                 memcpy(buf, map->work_buf, map->format.reg_bytes);
1495                 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1496                        val, val_len);
1497                 ret = map->bus->write(map->bus_context, buf, len);
1498
1499                 kfree(buf);
1500         }
1501
1502         trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1503
1504         return ret;
1505 }
1506
1507 /**
1508  * regmap_can_raw_write - Test if regmap_raw_write() is supported
1509  *
1510  * @map: Map to check.
1511  */
1512 bool regmap_can_raw_write(struct regmap *map)
1513 {
1514         return map->bus && map->bus->write && map->format.format_val &&
1515                 map->format.format_reg;
1516 }
1517 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1518
1519 /**
1520  * regmap_get_raw_read_max - Get the maximum size we can read
1521  *
1522  * @map: Map to check.
1523  */
1524 size_t regmap_get_raw_read_max(struct regmap *map)
1525 {
1526         return map->max_raw_read;
1527 }
1528 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1529
1530 /**
1531  * regmap_get_raw_write_max - Get the maximum size we can read
1532  *
1533  * @map: Map to check.
1534  */
1535 size_t regmap_get_raw_write_max(struct regmap *map)
1536 {
1537         return map->max_raw_write;
1538 }
1539 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1540
1541 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1542                                        unsigned int val)
1543 {
1544         int ret;
1545         struct regmap_range_node *range;
1546         struct regmap *map = context;
1547
1548         WARN_ON(!map->bus || !map->format.format_write);
1549
1550         range = _regmap_range_lookup(map, reg);
1551         if (range) {
1552                 ret = _regmap_select_page(map, &reg, range, 1);
1553                 if (ret != 0)
1554                         return ret;
1555         }
1556
1557         map->format.format_write(map, reg, val);
1558
1559         trace_regmap_hw_write_start(map, reg, 1);
1560
1561         ret = map->bus->write(map->bus_context, map->work_buf,
1562                               map->format.buf_size);
1563
1564         trace_regmap_hw_write_done(map, reg, 1);
1565
1566         return ret;
1567 }
1568
1569 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1570                                  unsigned int val)
1571 {
1572         struct regmap *map = context;
1573
1574         return map->bus->reg_write(map->bus_context, reg, val);
1575 }
1576
1577 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1578                                  unsigned int val)
1579 {
1580         struct regmap *map = context;
1581
1582         WARN_ON(!map->bus || !map->format.format_val);
1583
1584         map->format.format_val(map->work_buf + map->format.reg_bytes
1585                                + map->format.pad_bytes, val, 0);
1586         return _regmap_raw_write(map, reg,
1587                                  map->work_buf +
1588                                  map->format.reg_bytes +
1589                                  map->format.pad_bytes,
1590                                  map->format.val_bytes);
1591 }
1592
1593 static inline void *_regmap_map_get_context(struct regmap *map)
1594 {
1595         return (map->bus) ? map : map->bus_context;
1596 }
1597
1598 int _regmap_write(struct regmap *map, unsigned int reg,
1599                   unsigned int val)
1600 {
1601         int ret;
1602         void *context = _regmap_map_get_context(map);
1603
1604         if (!regmap_writeable(map, reg))
1605                 return -EIO;
1606
1607         if (!map->cache_bypass && !map->defer_caching) {
1608                 ret = regcache_write(map, reg, val);
1609                 if (ret != 0)
1610                         return ret;
1611                 if (map->cache_only) {
1612                         map->cache_dirty = true;
1613                         return 0;
1614                 }
1615         }
1616
1617 #ifdef LOG_DEVICE
1618         if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1619                 dev_info(map->dev, "%x <= %x\n", reg, val);
1620 #endif
1621
1622         trace_regmap_reg_write(map, reg, val);
1623
1624         return map->reg_write(context, reg, val);
1625 }
1626
1627 /**
1628  * regmap_write(): Write a value to a single register
1629  *
1630  * @map: Register map to write to
1631  * @reg: Register to write to
1632  * @val: Value to be written
1633  *
1634  * A value of zero will be returned on success, a negative errno will
1635  * be returned in error cases.
1636  */
1637 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1638 {
1639         int ret;
1640
1641         if (!IS_ALIGNED(reg, map->reg_stride))
1642                 return -EINVAL;
1643
1644         map->lock(map->lock_arg);
1645
1646         ret = _regmap_write(map, reg, val);
1647
1648         map->unlock(map->lock_arg);
1649
1650         return ret;
1651 }
1652 EXPORT_SYMBOL_GPL(regmap_write);
1653
1654 /**
1655  * regmap_write_async(): Write a value to a single register asynchronously
1656  *
1657  * @map: Register map to write to
1658  * @reg: Register to write to
1659  * @val: Value to be written
1660  *
1661  * A value of zero will be returned on success, a negative errno will
1662  * be returned in error cases.
1663  */
1664 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1665 {
1666         int ret;
1667
1668         if (!IS_ALIGNED(reg, map->reg_stride))
1669                 return -EINVAL;
1670
1671         map->lock(map->lock_arg);
1672
1673         map->async = true;
1674
1675         ret = _regmap_write(map, reg, val);
1676
1677         map->async = false;
1678
1679         map->unlock(map->lock_arg);
1680
1681         return ret;
1682 }
1683 EXPORT_SYMBOL_GPL(regmap_write_async);
1684
1685 /**
1686  * regmap_raw_write(): Write raw values to one or more registers
1687  *
1688  * @map: Register map to write to
1689  * @reg: Initial register to write to
1690  * @val: Block of data to be written, laid out for direct transmission to the
1691  *       device
1692  * @val_len: Length of data pointed to by val.
1693  *
1694  * This function is intended to be used for things like firmware
1695  * download where a large block of data needs to be transferred to the
1696  * device.  No formatting will be done on the data provided.
1697  *
1698  * A value of zero will be returned on success, a negative errno will
1699  * be returned in error cases.
1700  */
1701 int regmap_raw_write(struct regmap *map, unsigned int reg,
1702                      const void *val, size_t val_len)
1703 {
1704         int ret;
1705
1706         if (!regmap_can_raw_write(map))
1707                 return -EINVAL;
1708         if (val_len % map->format.val_bytes)
1709                 return -EINVAL;
1710         if (map->max_raw_write && map->max_raw_write > val_len)
1711                 return -E2BIG;
1712
1713         map->lock(map->lock_arg);
1714
1715         ret = _regmap_raw_write(map, reg, val, val_len);
1716
1717         map->unlock(map->lock_arg);
1718
1719         return ret;
1720 }
1721 EXPORT_SYMBOL_GPL(regmap_raw_write);
1722
1723 /**
1724  * regmap_field_update_bits_base():
1725  *      Perform a read/modify/write cycle on the register field
1726  *      with change, async, force option
1727  *
1728  * @field: Register field to write to
1729  * @mask: Bitmask to change
1730  * @val: Value to be written
1731  * @change: Boolean indicating if a write was done
1732  * @async: Boolean indicating asynchronously
1733  * @force: Boolean indicating use force update
1734  *
1735  * A value of zero will be returned on success, a negative errno will
1736  * be returned in error cases.
1737  */
1738 int regmap_field_update_bits_base(struct regmap_field *field,
1739                                   unsigned int mask, unsigned int val,
1740                                   bool *change, bool async, bool force)
1741 {
1742         mask = (mask << field->shift) & field->mask;
1743
1744         return regmap_update_bits_base(field->regmap, field->reg,
1745                                        mask, val << field->shift,
1746                                        change, async, force);
1747 }
1748 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
1749
1750 /**
1751  * regmap_fields_update_bits_base():
1752  *      Perform a read/modify/write cycle on the register field
1753  *      with change, async, force option
1754  *
1755  * @field: Register field to write to
1756  * @id: port ID
1757  * @mask: Bitmask to change
1758  * @val: Value to be written
1759  * @change: Boolean indicating if a write was done
1760  * @async: Boolean indicating asynchronously
1761  * @force: Boolean indicating use force update
1762  *
1763  * A value of zero will be returned on success, a negative errno will
1764  * be returned in error cases.
1765  */
1766 int regmap_fields_update_bits_base(struct regmap_field *field,  unsigned int id,
1767                                    unsigned int mask, unsigned int val,
1768                                    bool *change, bool async, bool force)
1769 {
1770         if (id >= field->id_size)
1771                 return -EINVAL;
1772
1773         mask = (mask << field->shift) & field->mask;
1774
1775         return regmap_update_bits_base(field->regmap,
1776                                        field->reg + (field->id_offset * id),
1777                                        mask, val << field->shift,
1778                                        change, async, force);
1779 }
1780 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
1781
1782 /*
1783  * regmap_bulk_write(): Write multiple registers to the device
1784  *
1785  * @map: Register map to write to
1786  * @reg: First register to be write from
1787  * @val: Block of data to be written, in native register size for device
1788  * @val_count: Number of registers to write
1789  *
1790  * This function is intended to be used for writing a large block of
1791  * data to the device either in single transfer or multiple transfer.
1792  *
1793  * A value of zero will be returned on success, a negative errno will
1794  * be returned in error cases.
1795  */
1796 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1797                      size_t val_count)
1798 {
1799         int ret = 0, i;
1800         size_t val_bytes = map->format.val_bytes;
1801         size_t total_size = val_bytes * val_count;
1802
1803         if (!IS_ALIGNED(reg, map->reg_stride))
1804                 return -EINVAL;
1805
1806         /*
1807          * Some devices don't support bulk write, for
1808          * them we have a series of single write operations in the first two if
1809          * blocks.
1810          *
1811          * The first if block is used for memory mapped io. It does not allow
1812          * val_bytes of 3 for example.
1813          * The second one is for busses that do not provide raw I/O.
1814          * The third one is used for busses which do not have these limitations
1815          * and can write arbitrary value lengths.
1816          */
1817         if (!map->bus) {
1818                 map->lock(map->lock_arg);
1819                 for (i = 0; i < val_count; i++) {
1820                         unsigned int ival;
1821
1822                         switch (val_bytes) {
1823                         case 1:
1824                                 ival = *(u8 *)(val + (i * val_bytes));
1825                                 break;
1826                         case 2:
1827                                 ival = *(u16 *)(val + (i * val_bytes));
1828                                 break;
1829                         case 4:
1830                                 ival = *(u32 *)(val + (i * val_bytes));
1831                                 break;
1832 #ifdef CONFIG_64BIT
1833                         case 8:
1834                                 ival = *(u64 *)(val + (i * val_bytes));
1835                                 break;
1836 #endif
1837                         default:
1838                                 ret = -EINVAL;
1839                                 goto out;
1840                         }
1841
1842                         ret = _regmap_write(map,
1843                                             reg + regmap_get_offset(map, i),
1844                                             ival);
1845                         if (ret != 0)
1846                                 goto out;
1847                 }
1848 out:
1849                 map->unlock(map->lock_arg);
1850         } else if (map->bus && !map->format.parse_inplace) {
1851                 const u8 *u8 = val;
1852                 const u16 *u16 = val;
1853                 const u32 *u32 = val;
1854                 unsigned int ival;
1855
1856                 for (i = 0; i < val_count; i++) {
1857                         switch (map->format.val_bytes) {
1858                         case 4:
1859                                 ival = u32[i];
1860                                 break;
1861                         case 2:
1862                                 ival = u16[i];
1863                                 break;
1864                         case 1:
1865                                 ival = u8[i];
1866                                 break;
1867                         default:
1868                                 return -EINVAL;
1869                         }
1870
1871                         ret = regmap_write(map, reg + (i * map->reg_stride),
1872                                            ival);
1873                         if (ret)
1874                                 return ret;
1875                 }
1876         } else if (map->use_single_write ||
1877                    (map->max_raw_write && map->max_raw_write < total_size)) {
1878                 int chunk_stride = map->reg_stride;
1879                 size_t chunk_size = val_bytes;
1880                 size_t chunk_count = val_count;
1881
1882                 if (!map->use_single_write) {
1883                         chunk_size = map->max_raw_write;
1884                         if (chunk_size % val_bytes)
1885                                 chunk_size -= chunk_size % val_bytes;
1886                         chunk_count = total_size / chunk_size;
1887                         chunk_stride *= chunk_size / val_bytes;
1888                 }
1889
1890                 map->lock(map->lock_arg);
1891                 /* Write as many bytes as possible with chunk_size */
1892                 for (i = 0; i < chunk_count; i++) {
1893                         ret = _regmap_raw_write(map,
1894                                                 reg + (i * chunk_stride),
1895                                                 val + (i * chunk_size),
1896                                                 chunk_size);
1897                         if (ret)
1898                                 break;
1899                 }
1900
1901                 /* Write remaining bytes */
1902                 if (!ret && chunk_size * i < total_size) {
1903                         ret = _regmap_raw_write(map, reg + (i * chunk_stride),
1904                                                 val + (i * chunk_size),
1905                                                 total_size - i * chunk_size);
1906                 }
1907                 map->unlock(map->lock_arg);
1908         } else {
1909                 void *wval;
1910
1911                 if (!val_count)
1912                         return -EINVAL;
1913
1914                 wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
1915                 if (!wval) {
1916                         dev_err(map->dev, "Error in memory allocation\n");
1917                         return -ENOMEM;
1918                 }
1919                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1920                         map->format.parse_inplace(wval + i);
1921
1922                 map->lock(map->lock_arg);
1923                 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1924                 map->unlock(map->lock_arg);
1925
1926                 kfree(wval);
1927         }
1928         return ret;
1929 }
1930 EXPORT_SYMBOL_GPL(regmap_bulk_write);
1931
1932 /*
1933  * _regmap_raw_multi_reg_write()
1934  *
1935  * the (register,newvalue) pairs in regs have not been formatted, but
1936  * they are all in the same page and have been changed to being page
1937  * relative. The page register has been written if that was necessary.
1938  */
1939 static int _regmap_raw_multi_reg_write(struct regmap *map,
1940                                        const struct reg_sequence *regs,
1941                                        size_t num_regs)
1942 {
1943         int ret;
1944         void *buf;
1945         int i;
1946         u8 *u8;
1947         size_t val_bytes = map->format.val_bytes;
1948         size_t reg_bytes = map->format.reg_bytes;
1949         size_t pad_bytes = map->format.pad_bytes;
1950         size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1951         size_t len = pair_size * num_regs;
1952
1953         if (!len)
1954                 return -EINVAL;
1955
1956         buf = kzalloc(len, GFP_KERNEL);
1957         if (!buf)
1958                 return -ENOMEM;
1959
1960         /* We have to linearise by hand. */
1961
1962         u8 = buf;
1963
1964         for (i = 0; i < num_regs; i++) {
1965                 unsigned int reg = regs[i].reg;
1966                 unsigned int val = regs[i].def;
1967                 trace_regmap_hw_write_start(map, reg, 1);
1968                 map->format.format_reg(u8, reg, map->reg_shift);
1969                 u8 += reg_bytes + pad_bytes;
1970                 map->format.format_val(u8, val, 0);
1971                 u8 += val_bytes;
1972         }
1973         u8 = buf;
1974         *u8 |= map->write_flag_mask;
1975
1976         ret = map->bus->write(map->bus_context, buf, len);
1977
1978         kfree(buf);
1979
1980         for (i = 0; i < num_regs; i++) {
1981                 int reg = regs[i].reg;
1982                 trace_regmap_hw_write_done(map, reg, 1);
1983         }
1984         return ret;
1985 }
1986
1987 static unsigned int _regmap_register_page(struct regmap *map,
1988                                           unsigned int reg,
1989                                           struct regmap_range_node *range)
1990 {
1991         unsigned int win_page = (reg - range->range_min) / range->window_len;
1992
1993         return win_page;
1994 }
1995
1996 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1997                                                struct reg_sequence *regs,
1998                                                size_t num_regs)
1999 {
2000         int ret;
2001         int i, n;
2002         struct reg_sequence *base;
2003         unsigned int this_page = 0;
2004         unsigned int page_change = 0;
2005         /*
2006          * the set of registers are not neccessarily in order, but
2007          * since the order of write must be preserved this algorithm
2008          * chops the set each time the page changes. This also applies
2009          * if there is a delay required at any point in the sequence.
2010          */
2011         base = regs;
2012         for (i = 0, n = 0; i < num_regs; i++, n++) {
2013                 unsigned int reg = regs[i].reg;
2014                 struct regmap_range_node *range;
2015
2016                 range = _regmap_range_lookup(map, reg);
2017                 if (range) {
2018                         unsigned int win_page = _regmap_register_page(map, reg,
2019                                                                       range);
2020
2021                         if (i == 0)
2022                                 this_page = win_page;
2023                         if (win_page != this_page) {
2024                                 this_page = win_page;
2025                                 page_change = 1;
2026                         }
2027                 }
2028
2029                 /* If we have both a page change and a delay make sure to
2030                  * write the regs and apply the delay before we change the
2031                  * page.
2032                  */
2033
2034                 if (page_change || regs[i].delay_us) {
2035
2036                                 /* For situations where the first write requires
2037                                  * a delay we need to make sure we don't call
2038                                  * raw_multi_reg_write with n=0
2039                                  * This can't occur with page breaks as we
2040                                  * never write on the first iteration
2041                                  */
2042                                 if (regs[i].delay_us && i == 0)
2043                                         n = 1;
2044
2045                                 ret = _regmap_raw_multi_reg_write(map, base, n);
2046                                 if (ret != 0)
2047                                         return ret;
2048
2049                                 if (regs[i].delay_us)
2050                                         udelay(regs[i].delay_us);
2051
2052                                 base += n;
2053                                 n = 0;
2054
2055                                 if (page_change) {
2056                                         ret = _regmap_select_page(map,
2057                                                                   &base[n].reg,
2058                                                                   range, 1);
2059                                         if (ret != 0)
2060                                                 return ret;
2061
2062                                         page_change = 0;
2063                                 }
2064
2065                 }
2066
2067         }
2068         if (n > 0)
2069                 return _regmap_raw_multi_reg_write(map, base, n);
2070         return 0;
2071 }
2072
2073 static int _regmap_multi_reg_write(struct regmap *map,
2074                                    const struct reg_sequence *regs,
2075                                    size_t num_regs)
2076 {
2077         int i;
2078         int ret;
2079
2080         if (!map->can_multi_write) {
2081                 for (i = 0; i < num_regs; i++) {
2082                         ret = _regmap_write(map, regs[i].reg, regs[i].def);
2083                         if (ret != 0)
2084                                 return ret;
2085
2086                         if (regs[i].delay_us)
2087                                 udelay(regs[i].delay_us);
2088                 }
2089                 return 0;
2090         }
2091
2092         if (!map->format.parse_inplace)
2093                 return -EINVAL;
2094
2095         if (map->writeable_reg)
2096                 for (i = 0; i < num_regs; i++) {
2097                         int reg = regs[i].reg;
2098                         if (!map->writeable_reg(map->dev, reg))
2099                                 return -EINVAL;
2100                         if (!IS_ALIGNED(reg, map->reg_stride))
2101                                 return -EINVAL;
2102                 }
2103
2104         if (!map->cache_bypass) {
2105                 for (i = 0; i < num_regs; i++) {
2106                         unsigned int val = regs[i].def;
2107                         unsigned int reg = regs[i].reg;
2108                         ret = regcache_write(map, reg, val);
2109                         if (ret) {
2110                                 dev_err(map->dev,
2111                                 "Error in caching of register: %x ret: %d\n",
2112                                                                 reg, ret);
2113                                 return ret;
2114                         }
2115                 }
2116                 if (map->cache_only) {
2117                         map->cache_dirty = true;
2118                         return 0;
2119                 }
2120         }
2121
2122         WARN_ON(!map->bus);
2123
2124         for (i = 0; i < num_regs; i++) {
2125                 unsigned int reg = regs[i].reg;
2126                 struct regmap_range_node *range;
2127
2128                 /* Coalesce all the writes between a page break or a delay
2129                  * in a sequence
2130                  */
2131                 range = _regmap_range_lookup(map, reg);
2132                 if (range || regs[i].delay_us) {
2133                         size_t len = sizeof(struct reg_sequence)*num_regs;
2134                         struct reg_sequence *base = kmemdup(regs, len,
2135                                                            GFP_KERNEL);
2136                         if (!base)
2137                                 return -ENOMEM;
2138                         ret = _regmap_range_multi_paged_reg_write(map, base,
2139                                                                   num_regs);
2140                         kfree(base);
2141
2142                         return ret;
2143                 }
2144         }
2145         return _regmap_raw_multi_reg_write(map, regs, num_regs);
2146 }
2147
2148 /*
2149  * regmap_multi_reg_write(): Write multiple registers to the device
2150  *
2151  * where the set of register,value pairs are supplied in any order,
2152  * possibly not all in a single range.
2153  *
2154  * @map: Register map to write to
2155  * @regs: Array of structures containing register,value to be written
2156  * @num_regs: Number of registers to write
2157  *
2158  * The 'normal' block write mode will send ultimately send data on the
2159  * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
2160  * addressed. However, this alternative block multi write mode will send
2161  * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2162  * must of course support the mode.
2163  *
2164  * A value of zero will be returned on success, a negative errno will be
2165  * returned in error cases.
2166  */
2167 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2168                            int num_regs)
2169 {
2170         int ret;
2171
2172         map->lock(map->lock_arg);
2173
2174         ret = _regmap_multi_reg_write(map, regs, num_regs);
2175
2176         map->unlock(map->lock_arg);
2177
2178         return ret;
2179 }
2180 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2181
2182 /*
2183  * regmap_multi_reg_write_bypassed(): Write multiple registers to the
2184  *                                    device but not the cache
2185  *
2186  * where the set of register are supplied in any order
2187  *
2188  * @map: Register map to write to
2189  * @regs: Array of structures containing register,value to be written
2190  * @num_regs: Number of registers to write
2191  *
2192  * This function is intended to be used for writing a large block of data
2193  * atomically to the device in single transfer for those I2C client devices
2194  * that implement this alternative block write mode.
2195  *
2196  * A value of zero will be returned on success, a negative errno will
2197  * be returned in error cases.
2198  */
2199 int regmap_multi_reg_write_bypassed(struct regmap *map,
2200                                     const struct reg_sequence *regs,
2201                                     int num_regs)
2202 {
2203         int ret;
2204         bool bypass;
2205
2206         map->lock(map->lock_arg);
2207
2208         bypass = map->cache_bypass;
2209         map->cache_bypass = true;
2210
2211         ret = _regmap_multi_reg_write(map, regs, num_regs);
2212
2213         map->cache_bypass = bypass;
2214
2215         map->unlock(map->lock_arg);
2216
2217         return ret;
2218 }
2219 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2220
2221 /**
2222  * regmap_raw_write_async(): Write raw values to one or more registers
2223  *                           asynchronously
2224  *
2225  * @map: Register map to write to
2226  * @reg: Initial register to write to
2227  * @val: Block of data to be written, laid out for direct transmission to the
2228  *       device.  Must be valid until regmap_async_complete() is called.
2229  * @val_len: Length of data pointed to by val.
2230  *
2231  * This function is intended to be used for things like firmware
2232  * download where a large block of data needs to be transferred to the
2233  * device.  No formatting will be done on the data provided.
2234  *
2235  * If supported by the underlying bus the write will be scheduled
2236  * asynchronously, helping maximise I/O speed on higher speed buses
2237  * like SPI.  regmap_async_complete() can be called to ensure that all
2238  * asynchrnous writes have been completed.
2239  *
2240  * A value of zero will be returned on success, a negative errno will
2241  * be returned in error cases.
2242  */
2243 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2244                            const void *val, size_t val_len)
2245 {
2246         int ret;
2247
2248         if (val_len % map->format.val_bytes)
2249                 return -EINVAL;
2250         if (!IS_ALIGNED(reg, map->reg_stride))
2251                 return -EINVAL;
2252
2253         map->lock(map->lock_arg);
2254
2255         map->async = true;
2256
2257         ret = _regmap_raw_write(map, reg, val, val_len);
2258
2259         map->async = false;
2260
2261         map->unlock(map->lock_arg);
2262
2263         return ret;
2264 }
2265 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2266
2267 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2268                             unsigned int val_len)
2269 {
2270         struct regmap_range_node *range;
2271         int ret;
2272
2273         WARN_ON(!map->bus);
2274
2275         if (!map->bus || !map->bus->read)
2276                 return -EINVAL;
2277
2278         range = _regmap_range_lookup(map, reg);
2279         if (range) {
2280                 ret = _regmap_select_page(map, &reg, range,
2281                                           val_len / map->format.val_bytes);
2282                 if (ret != 0)
2283                         return ret;
2284         }
2285
2286         map->format.format_reg(map->work_buf, reg, map->reg_shift);
2287         regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2288                                       map->read_flag_mask);
2289         trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2290
2291         ret = map->bus->read(map->bus_context, map->work_buf,
2292                              map->format.reg_bytes + map->format.pad_bytes,
2293                              val, val_len);
2294
2295         trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2296
2297         return ret;
2298 }
2299
2300 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2301                                 unsigned int *val)
2302 {
2303         struct regmap *map = context;
2304
2305         return map->bus->reg_read(map->bus_context, reg, val);
2306 }
2307
2308 static int _regmap_bus_read(void *context, unsigned int reg,
2309                             unsigned int *val)
2310 {
2311         int ret;
2312         struct regmap *map = context;
2313
2314         if (!map->format.parse_val)
2315                 return -EINVAL;
2316
2317         ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
2318         if (ret == 0)
2319                 *val = map->format.parse_val(map->work_buf);
2320
2321         return ret;
2322 }
2323
2324 static int _regmap_read(struct regmap *map, unsigned int reg,
2325                         unsigned int *val)
2326 {
2327         int ret;
2328         void *context = _regmap_map_get_context(map);
2329
2330         if (!map->cache_bypass) {
2331                 ret = regcache_read(map, reg, val);
2332                 if (ret == 0)
2333                         return 0;
2334         }
2335
2336         if (map->cache_only)
2337                 return -EBUSY;
2338
2339         if (!regmap_readable(map, reg))
2340                 return -EIO;
2341
2342         ret = map->reg_read(context, reg, val);
2343         if (ret == 0) {
2344 #ifdef LOG_DEVICE
2345                 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2346                         dev_info(map->dev, "%x => %x\n", reg, *val);
2347 #endif
2348
2349                 trace_regmap_reg_read(map, reg, *val);
2350
2351                 if (!map->cache_bypass)
2352                         regcache_write(map, reg, *val);
2353         }
2354
2355         return ret;
2356 }
2357
2358 /**
2359  * regmap_read(): Read a value from a single register
2360  *
2361  * @map: Register map to read from
2362  * @reg: Register to be read from
2363  * @val: Pointer to store read value
2364  *
2365  * A value of zero will be returned on success, a negative errno will
2366  * be returned in error cases.
2367  */
2368 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2369 {
2370         int ret;
2371
2372         if (!IS_ALIGNED(reg, map->reg_stride))
2373                 return -EINVAL;
2374
2375         map->lock(map->lock_arg);
2376
2377         ret = _regmap_read(map, reg, val);
2378
2379         map->unlock(map->lock_arg);
2380
2381         return ret;
2382 }
2383 EXPORT_SYMBOL_GPL(regmap_read);
2384
2385 /**
2386  * regmap_raw_read(): Read raw data from the device
2387  *
2388  * @map: Register map to read from
2389  * @reg: First register to be read from
2390  * @val: Pointer to store read value
2391  * @val_len: Size of data to read
2392  *
2393  * A value of zero will be returned on success, a negative errno will
2394  * be returned in error cases.
2395  */
2396 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2397                     size_t val_len)
2398 {
2399         size_t val_bytes = map->format.val_bytes;
2400         size_t val_count = val_len / val_bytes;
2401         unsigned int v;
2402         int ret, i;
2403
2404         if (!map->bus)
2405                 return -EINVAL;
2406         if (val_len % map->format.val_bytes)
2407                 return -EINVAL;
2408         if (!IS_ALIGNED(reg, map->reg_stride))
2409                 return -EINVAL;
2410         if (val_count == 0)
2411                 return -EINVAL;
2412
2413         map->lock(map->lock_arg);
2414
2415         if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2416             map->cache_type == REGCACHE_NONE) {
2417                 if (!map->bus->read) {
2418                         ret = -ENOTSUPP;
2419                         goto out;
2420                 }
2421                 if (map->max_raw_read && map->max_raw_read < val_len) {
2422                         ret = -E2BIG;
2423                         goto out;
2424                 }
2425
2426                 /* Physical block read if there's no cache involved */
2427                 ret = _regmap_raw_read(map, reg, val, val_len);
2428
2429         } else {
2430                 /* Otherwise go word by word for the cache; should be low
2431                  * cost as we expect to hit the cache.
2432                  */
2433                 for (i = 0; i < val_count; i++) {
2434                         ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2435                                            &v);
2436                         if (ret != 0)
2437                                 goto out;
2438
2439                         map->format.format_val(val + (i * val_bytes), v, 0);
2440                 }
2441         }
2442
2443  out:
2444         map->unlock(map->lock_arg);
2445
2446         return ret;
2447 }
2448 EXPORT_SYMBOL_GPL(regmap_raw_read);
2449
2450 /**
2451  * regmap_field_read(): Read a value to a single register field
2452  *
2453  * @field: Register field to read from
2454  * @val: Pointer to store read value
2455  *
2456  * A value of zero will be returned on success, a negative errno will
2457  * be returned in error cases.
2458  */
2459 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2460 {
2461         int ret;
2462         unsigned int reg_val;
2463         ret = regmap_read(field->regmap, field->reg, &reg_val);
2464         if (ret != 0)
2465                 return ret;
2466
2467         reg_val &= field->mask;
2468         reg_val >>= field->shift;
2469         *val = reg_val;
2470
2471         return ret;
2472 }
2473 EXPORT_SYMBOL_GPL(regmap_field_read);
2474
2475 /**
2476  * regmap_fields_read(): Read a value to a single register field with port ID
2477  *
2478  * @field: Register field to read from
2479  * @id: port ID
2480  * @val: Pointer to store read value
2481  *
2482  * A value of zero will be returned on success, a negative errno will
2483  * be returned in error cases.
2484  */
2485 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2486                        unsigned int *val)
2487 {
2488         int ret;
2489         unsigned int reg_val;
2490
2491         if (id >= field->id_size)
2492                 return -EINVAL;
2493
2494         ret = regmap_read(field->regmap,
2495                           field->reg + (field->id_offset * id),
2496                           &reg_val);
2497         if (ret != 0)
2498                 return ret;
2499
2500         reg_val &= field->mask;
2501         reg_val >>= field->shift;
2502         *val = reg_val;
2503
2504         return ret;
2505 }
2506 EXPORT_SYMBOL_GPL(regmap_fields_read);
2507
2508 /**
2509  * regmap_bulk_read(): Read multiple registers from the device
2510  *
2511  * @map: Register map to read from
2512  * @reg: First register to be read from
2513  * @val: Pointer to store read value, in native register size for device
2514  * @val_count: Number of registers to read
2515  *
2516  * A value of zero will be returned on success, a negative errno will
2517  * be returned in error cases.
2518  */
2519 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2520                      size_t val_count)
2521 {
2522         int ret, i;
2523         size_t val_bytes = map->format.val_bytes;
2524         bool vol = regmap_volatile_range(map, reg, val_count);
2525
2526         if (!IS_ALIGNED(reg, map->reg_stride))
2527                 return -EINVAL;
2528
2529         if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2530                 /*
2531                  * Some devices does not support bulk read, for
2532                  * them we have a series of single read operations.
2533                  */
2534                 size_t total_size = val_bytes * val_count;
2535
2536                 if (!map->use_single_read &&
2537                     (!map->max_raw_read || map->max_raw_read > total_size)) {
2538                         ret = regmap_raw_read(map, reg, val,
2539                                               val_bytes * val_count);
2540                         if (ret != 0)
2541                                 return ret;
2542                 } else {
2543                         /*
2544                          * Some devices do not support bulk read or do not
2545                          * support large bulk reads, for them we have a series
2546                          * of read operations.
2547                          */
2548                         int chunk_stride = map->reg_stride;
2549                         size_t chunk_size = val_bytes;
2550                         size_t chunk_count = val_count;
2551
2552                         if (!map->use_single_read) {
2553                                 chunk_size = map->max_raw_read;
2554                                 if (chunk_size % val_bytes)
2555                                         chunk_size -= chunk_size % val_bytes;
2556                                 chunk_count = total_size / chunk_size;
2557                                 chunk_stride *= chunk_size / val_bytes;
2558                         }
2559
2560                         /* Read bytes that fit into a multiple of chunk_size */
2561                         for (i = 0; i < chunk_count; i++) {
2562                                 ret = regmap_raw_read(map,
2563                                                       reg + (i * chunk_stride),
2564                                                       val + (i * chunk_size),
2565                                                       chunk_size);
2566                                 if (ret != 0)
2567                                         return ret;
2568                         }
2569
2570                         /* Read remaining bytes */
2571                         if (chunk_size * i < total_size) {
2572                                 ret = regmap_raw_read(map,
2573                                                       reg + (i * chunk_stride),
2574                                                       val + (i * chunk_size),
2575                                                       total_size - i * chunk_size);
2576                                 if (ret != 0)
2577                                         return ret;
2578                         }
2579                 }
2580
2581                 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2582                         map->format.parse_inplace(val + i);
2583         } else {
2584                 for (i = 0; i < val_count; i++) {
2585                         unsigned int ival;
2586                         ret = regmap_read(map, reg + regmap_get_offset(map, i),
2587                                           &ival);
2588                         if (ret != 0)
2589                                 return ret;
2590
2591                         if (map->format.format_val) {
2592                                 map->format.format_val(val + (i * val_bytes), ival, 0);
2593                         } else {
2594                                 /* Devices providing read and write
2595                                  * operations can use the bulk I/O
2596                                  * functions if they define a val_bytes,
2597                                  * we assume that the values are native
2598                                  * endian.
2599                                  */
2600 #ifdef CONFIG_64BIT
2601                                 u64 *u64 = val;
2602 #endif
2603                                 u32 *u32 = val;
2604                                 u16 *u16 = val;
2605                                 u8 *u8 = val;
2606
2607                                 switch (map->format.val_bytes) {
2608 #ifdef CONFIG_64BIT
2609                                 case 8:
2610                                         u64[i] = ival;
2611                                         break;
2612 #endif
2613                                 case 4:
2614                                         u32[i] = ival;
2615                                         break;
2616                                 case 2:
2617                                         u16[i] = ival;
2618                                         break;
2619                                 case 1:
2620                                         u8[i] = ival;
2621                                         break;
2622                                 default:
2623                                         return -EINVAL;
2624                                 }
2625                         }
2626                 }
2627         }
2628
2629         return 0;
2630 }
2631 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2632
2633 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2634                                unsigned int mask, unsigned int val,
2635                                bool *change, bool force_write)
2636 {
2637         int ret;
2638         unsigned int tmp, orig;
2639
2640         if (change)
2641                 *change = false;
2642
2643         if (regmap_volatile(map, reg) && map->reg_update_bits) {
2644                 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
2645                 if (ret == 0 && change)
2646                         *change = true;
2647         } else {
2648                 ret = _regmap_read(map, reg, &orig);
2649                 if (ret != 0)
2650                         return ret;
2651
2652                 tmp = orig & ~mask;
2653                 tmp |= val & mask;
2654
2655                 if (force_write || (tmp != orig)) {
2656                         ret = _regmap_write(map, reg, tmp);
2657                         if (ret == 0 && change)
2658                                 *change = true;
2659                 }
2660         }
2661
2662         return ret;
2663 }
2664
2665 /**
2666  * regmap_update_bits_base:
2667  *      Perform a read/modify/write cycle on the
2668  *      register map with change, async, force option
2669  *
2670  * @map: Register map to update
2671  * @reg: Register to update
2672  * @mask: Bitmask to change
2673  * @val: New value for bitmask
2674  * @change: Boolean indicating if a write was done
2675  * @async: Boolean indicating asynchronously
2676  * @force: Boolean indicating use force update
2677  *
2678  * if async was true,
2679  * With most buses the read must be done synchronously so this is most
2680  * useful for devices with a cache which do not need to interact with
2681  * the hardware to determine the current register value.
2682  *
2683  * Returns zero for success, a negative number on error.
2684  */
2685 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
2686                             unsigned int mask, unsigned int val,
2687                             bool *change, bool async, bool force)
2688 {
2689         int ret;
2690
2691         map->lock(map->lock_arg);
2692
2693         map->async = async;
2694
2695         ret = _regmap_update_bits(map, reg, mask, val, change, force);
2696
2697         map->async = false;
2698
2699         map->unlock(map->lock_arg);
2700
2701         return ret;
2702 }
2703 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
2704
2705 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2706 {
2707         struct regmap *map = async->map;
2708         bool wake;
2709
2710         trace_regmap_async_io_complete(map);
2711
2712         spin_lock(&map->async_lock);
2713         list_move(&async->list, &map->async_free);
2714         wake = list_empty(&map->async_list);
2715
2716         if (ret != 0)
2717                 map->async_ret = ret;
2718
2719         spin_unlock(&map->async_lock);
2720
2721         if (wake)
2722                 wake_up(&map->async_waitq);
2723 }
2724 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2725
2726 static int regmap_async_is_done(struct regmap *map)
2727 {
2728         unsigned long flags;
2729         int ret;
2730
2731         spin_lock_irqsave(&map->async_lock, flags);
2732         ret = list_empty(&map->async_list);
2733         spin_unlock_irqrestore(&map->async_lock, flags);
2734
2735         return ret;
2736 }
2737
2738 /**
2739  * regmap_async_complete: Ensure all asynchronous I/O has completed.
2740  *
2741  * @map: Map to operate on.
2742  *
2743  * Blocks until any pending asynchronous I/O has completed.  Returns
2744  * an error code for any failed I/O operations.
2745  */
2746 int regmap_async_complete(struct regmap *map)
2747 {
2748         unsigned long flags;
2749         int ret;
2750
2751         /* Nothing to do with no async support */
2752         if (!map->bus || !map->bus->async_write)
2753                 return 0;
2754
2755         trace_regmap_async_complete_start(map);
2756
2757         wait_event(map->async_waitq, regmap_async_is_done(map));
2758
2759         spin_lock_irqsave(&map->async_lock, flags);
2760         ret = map->async_ret;
2761         map->async_ret = 0;
2762         spin_unlock_irqrestore(&map->async_lock, flags);
2763
2764         trace_regmap_async_complete_done(map);
2765
2766         return ret;
2767 }
2768 EXPORT_SYMBOL_GPL(regmap_async_complete);
2769
2770 /**
2771  * regmap_register_patch: Register and apply register updates to be applied
2772  *                        on device initialistion
2773  *
2774  * @map: Register map to apply updates to.
2775  * @regs: Values to update.
2776  * @num_regs: Number of entries in regs.
2777  *
2778  * Register a set of register updates to be applied to the device
2779  * whenever the device registers are synchronised with the cache and
2780  * apply them immediately.  Typically this is used to apply
2781  * corrections to be applied to the device defaults on startup, such
2782  * as the updates some vendors provide to undocumented registers.
2783  *
2784  * The caller must ensure that this function cannot be called
2785  * concurrently with either itself or regcache_sync().
2786  */
2787 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2788                           int num_regs)
2789 {
2790         struct reg_sequence *p;
2791         int ret;
2792         bool bypass;
2793
2794         if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2795             num_regs))
2796                 return 0;
2797
2798         p = krealloc(map->patch,
2799                      sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2800                      GFP_KERNEL);
2801         if (p) {
2802                 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2803                 map->patch = p;
2804                 map->patch_regs += num_regs;
2805         } else {
2806                 return -ENOMEM;
2807         }
2808
2809         map->lock(map->lock_arg);
2810
2811         bypass = map->cache_bypass;
2812
2813         map->cache_bypass = true;
2814         map->async = true;
2815
2816         ret = _regmap_multi_reg_write(map, regs, num_regs);
2817
2818         map->async = false;
2819         map->cache_bypass = bypass;
2820
2821         map->unlock(map->lock_arg);
2822
2823         regmap_async_complete(map);
2824
2825         return ret;
2826 }
2827 EXPORT_SYMBOL_GPL(regmap_register_patch);
2828
2829 /*
2830  * regmap_get_val_bytes(): Report the size of a register value
2831  *
2832  * Report the size of a register value, mainly intended to for use by
2833  * generic infrastructure built on top of regmap.
2834  */
2835 int regmap_get_val_bytes(struct regmap *map)
2836 {
2837         if (map->format.format_write)
2838                 return -EINVAL;
2839
2840         return map->format.val_bytes;
2841 }
2842 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2843
2844 /**
2845  * regmap_get_max_register(): Report the max register value
2846  *
2847  * Report the max register value, mainly intended to for use by
2848  * generic infrastructure built on top of regmap.
2849  */
2850 int regmap_get_max_register(struct regmap *map)
2851 {
2852         return map->max_register ? map->max_register : -EINVAL;
2853 }
2854 EXPORT_SYMBOL_GPL(regmap_get_max_register);
2855
2856 /**
2857  * regmap_get_reg_stride(): Report the register address stride
2858  *
2859  * Report the register address stride, mainly intended to for use by
2860  * generic infrastructure built on top of regmap.
2861  */
2862 int regmap_get_reg_stride(struct regmap *map)
2863 {
2864         return map->reg_stride;
2865 }
2866 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
2867
2868 int regmap_parse_val(struct regmap *map, const void *buf,
2869                         unsigned int *val)
2870 {
2871         if (!map->format.parse_val)
2872                 return -EINVAL;
2873
2874         *val = map->format.parse_val(buf);
2875
2876         return 0;
2877 }
2878 EXPORT_SYMBOL_GPL(regmap_parse_val);
2879
2880 static int __init regmap_initcall(void)
2881 {
2882         regmap_debugfs_initcall();
2883
2884         return 0;
2885 }
2886 postcore_initcall(regmap_initcall);