1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Reset Controller framework
4 *
5 * Copyright 2013 Philipp Zabel, Pengutronix
6 */
7 #include <linux/atomic.h>
8 #include <linux/device.h>
9 #include <linux/err.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/reset.h>
16 #include <linux/reset-controller.h>
17 #include <linux/slab.h>
18
19 static DEFINE_MUTEX(reset_list_mutex);
20 static LIST_HEAD(reset_controller_list);
21
22 static DEFINE_MUTEX(reset_lookup_mutex);
23 static LIST_HEAD(reset_lookup_list);
24
25 /**
26 * struct reset_control - a reset control
27 * @rcdev: a pointer to the reset controller device
28 * this reset control belongs to
29 * @list: list entry for the rcdev's reset controller list
30 * @id: ID of the reset controller in the reset
31 * controller device
32 * @refcnt: Number of gets of this reset_control
33 * @acquired: Only one reset_control may be acquired for a given rcdev and id.
34 * @shared: Is this a shared (1), or an exclusive (0) reset_control?
35 * @array: Is this an array of reset controls (1)?
36 * @deassert_count: Number of times this reset line has been deasserted
37 * @triggered_count: Number of times this reset line has been reset. Currently
38 * only used for shared resets, which means that the value
39 * will be either 0 or 1.
40 */
41 struct reset_control {
42 struct reset_controller_dev *rcdev;
43 struct list_head list;
44 unsigned int id;
45 struct kref refcnt;
46 bool acquired;
47 bool shared;
48 bool array;
49 atomic_t deassert_count;
50 atomic_t triggered_count;
51 };
52
53 /**
54 * struct reset_control_array - an array of reset controls
55 * @base: reset control for compatibility with reset control API functions
56 * @num_rstcs: number of reset controls
57 * @rstc: array of reset controls
58 */
59 struct reset_control_array {
60 struct reset_control base;
61 unsigned int num_rstcs;
62 struct reset_control *rstc[];
63 };
64
rcdev_name(struct reset_controller_dev * rcdev)65 static const char *rcdev_name(struct reset_controller_dev *rcdev)
66 {
67 if (rcdev->dev)
68 return dev_name(rcdev->dev);
69
70 if (rcdev->of_node)
71 return rcdev->of_node->full_name;
72
73 return NULL;
74 }
75
76 /**
77 * of_reset_simple_xlate - translate reset_spec to the reset line number
78 * @rcdev: a pointer to the reset controller device
79 * @reset_spec: reset line specifier as found in the device tree
80 *
81 * This static translation function is used by default if of_xlate in
82 * :c:type:`reset_controller_dev` is not set. It is useful for all reset
83 * controllers with 1:1 mapping, where reset lines can be indexed by number
84 * without gaps.
85 */
of_reset_simple_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)86 static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
87 const struct of_phandle_args *reset_spec)
88 {
89 if (reset_spec->args[0] >= rcdev->nr_resets)
90 return -EINVAL;
91
92 return reset_spec->args[0];
93 }
94
95 /**
96 * reset_controller_register - register a reset controller device
97 * @rcdev: a pointer to the initialized reset controller device
98 */
reset_controller_register(struct reset_controller_dev * rcdev)99 int reset_controller_register(struct reset_controller_dev *rcdev)
100 {
101 if (!rcdev->of_xlate) {
102 rcdev->of_reset_n_cells = 1;
103 rcdev->of_xlate = of_reset_simple_xlate;
104 }
105
106 INIT_LIST_HEAD(&rcdev->reset_control_head);
107
108 mutex_lock(&reset_list_mutex);
109 list_add(&rcdev->list, &reset_controller_list);
110 mutex_unlock(&reset_list_mutex);
111
112 return 0;
113 }
114 EXPORT_SYMBOL_GPL(reset_controller_register);
115
116 /**
117 * reset_controller_unregister - unregister a reset controller device
118 * @rcdev: a pointer to the reset controller device
119 */
reset_controller_unregister(struct reset_controller_dev * rcdev)120 void reset_controller_unregister(struct reset_controller_dev *rcdev)
121 {
122 mutex_lock(&reset_list_mutex);
123 list_del(&rcdev->list);
124 mutex_unlock(&reset_list_mutex);
125 }
126 EXPORT_SYMBOL_GPL(reset_controller_unregister);
127
devm_reset_controller_release(struct device * dev,void * res)128 static void devm_reset_controller_release(struct device *dev, void *res)
129 {
130 reset_controller_unregister(*(struct reset_controller_dev **)res);
131 }
132
133 /**
134 * devm_reset_controller_register - resource managed reset_controller_register()
135 * @dev: device that is registering this reset controller
136 * @rcdev: a pointer to the initialized reset controller device
137 *
138 * Managed reset_controller_register(). For reset controllers registered by
139 * this function, reset_controller_unregister() is automatically called on
140 * driver detach. See reset_controller_register() for more information.
141 */
devm_reset_controller_register(struct device * dev,struct reset_controller_dev * rcdev)142 int devm_reset_controller_register(struct device *dev,
143 struct reset_controller_dev *rcdev)
144 {
145 struct reset_controller_dev **rcdevp;
146 int ret;
147
148 rcdevp = devres_alloc(devm_reset_controller_release, sizeof(*rcdevp),
149 GFP_KERNEL);
150 if (!rcdevp)
151 return -ENOMEM;
152
153 ret = reset_controller_register(rcdev);
154 if (ret) {
155 devres_free(rcdevp);
156 return ret;
157 }
158
159 *rcdevp = rcdev;
160 devres_add(dev, rcdevp);
161
162 return ret;
163 }
164 EXPORT_SYMBOL_GPL(devm_reset_controller_register);
165
166 /**
167 * reset_controller_add_lookup - register a set of lookup entries
168 * @lookup: array of reset lookup entries
169 * @num_entries: number of entries in the lookup array
170 */
reset_controller_add_lookup(struct reset_control_lookup * lookup,unsigned int num_entries)171 void reset_controller_add_lookup(struct reset_control_lookup *lookup,
172 unsigned int num_entries)
173 {
174 struct reset_control_lookup *entry;
175 unsigned int i;
176
177 mutex_lock(&reset_lookup_mutex);
178 for (i = 0; i < num_entries; i++) {
179 entry = &lookup[i];
180
181 if (!entry->dev_id || !entry->provider) {
182 pr_warn("%s(): reset lookup entry badly specified, skipping\n",
183 __func__);
184 continue;
185 }
186
187 list_add_tail(&entry->list, &reset_lookup_list);
188 }
189 mutex_unlock(&reset_lookup_mutex);
190 }
191 EXPORT_SYMBOL_GPL(reset_controller_add_lookup);
192
193 static inline struct reset_control_array *
rstc_to_array(struct reset_control * rstc)194 rstc_to_array(struct reset_control *rstc) {
195 return container_of(rstc, struct reset_control_array, base);
196 }
197
reset_control_array_reset(struct reset_control_array * resets)198 static int reset_control_array_reset(struct reset_control_array *resets)
199 {
200 int ret, i;
201
202 for (i = 0; i < resets->num_rstcs; i++) {
203 ret = reset_control_reset(resets->rstc[i]);
204 if (ret)
205 return ret;
206 }
207
208 return 0;
209 }
210
reset_control_array_rearm(struct reset_control_array * resets)211 static int reset_control_array_rearm(struct reset_control_array *resets)
212 {
213 struct reset_control *rstc;
214 int i;
215
216 for (i = 0; i < resets->num_rstcs; i++) {
217 rstc = resets->rstc[i];
218
219 if (!rstc)
220 continue;
221
222 if (WARN_ON(IS_ERR(rstc)))
223 return -EINVAL;
224
225 if (rstc->shared) {
226 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
227 return -EINVAL;
228 } else {
229 if (!rstc->acquired)
230 return -EPERM;
231 }
232 }
233
234 for (i = 0; i < resets->num_rstcs; i++) {
235 rstc = resets->rstc[i];
236
237 if (rstc && rstc->shared)
238 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
239 }
240
241 return 0;
242 }
243
reset_control_array_assert(struct reset_control_array * resets)244 static int reset_control_array_assert(struct reset_control_array *resets)
245 {
246 int ret, i;
247
248 for (i = 0; i < resets->num_rstcs; i++) {
249 ret = reset_control_assert(resets->rstc[i]);
250 if (ret)
251 goto err;
252 }
253
254 return 0;
255
256 err:
257 while (i--)
258 reset_control_deassert(resets->rstc[i]);
259 return ret;
260 }
261
reset_control_array_deassert(struct reset_control_array * resets)262 static int reset_control_array_deassert(struct reset_control_array *resets)
263 {
264 int ret, i;
265
266 for (i = 0; i < resets->num_rstcs; i++) {
267 ret = reset_control_deassert(resets->rstc[i]);
268 if (ret)
269 goto err;
270 }
271
272 return 0;
273
274 err:
275 while (i--)
276 reset_control_assert(resets->rstc[i]);
277 return ret;
278 }
279
reset_control_array_acquire(struct reset_control_array * resets)280 static int reset_control_array_acquire(struct reset_control_array *resets)
281 {
282 unsigned int i;
283 int err;
284
285 for (i = 0; i < resets->num_rstcs; i++) {
286 err = reset_control_acquire(resets->rstc[i]);
287 if (err < 0)
288 goto release;
289 }
290
291 return 0;
292
293 release:
294 while (i--)
295 reset_control_release(resets->rstc[i]);
296
297 return err;
298 }
299
reset_control_array_release(struct reset_control_array * resets)300 static void reset_control_array_release(struct reset_control_array *resets)
301 {
302 unsigned int i;
303
304 for (i = 0; i < resets->num_rstcs; i++)
305 reset_control_release(resets->rstc[i]);
306 }
307
reset_control_is_array(struct reset_control * rstc)308 static inline bool reset_control_is_array(struct reset_control *rstc)
309 {
310 return rstc->array;
311 }
312
313 /**
314 * reset_control_reset - reset the controlled device
315 * @rstc: reset controller
316 *
317 * On a shared reset line the actual reset pulse is only triggered once for the
318 * lifetime of the reset_control instance: for all but the first caller this is
319 * a no-op.
320 * Consumers must not use reset_control_(de)assert on shared reset lines when
321 * reset_control_reset has been used.
322 *
323 * If rstc is NULL it is an optional reset and the function will just
324 * return 0.
325 */
reset_control_reset(struct reset_control * rstc)326 int reset_control_reset(struct reset_control *rstc)
327 {
328 int ret;
329
330 if (!rstc)
331 return 0;
332
333 if (WARN_ON(IS_ERR(rstc)))
334 return -EINVAL;
335
336 if (reset_control_is_array(rstc))
337 return reset_control_array_reset(rstc_to_array(rstc));
338
339 if (!rstc->rcdev->ops->reset)
340 return -ENOTSUPP;
341
342 if (rstc->shared) {
343 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
344 return -EINVAL;
345
346 if (atomic_inc_return(&rstc->triggered_count) != 1)
347 return 0;
348 } else {
349 if (!rstc->acquired)
350 return -EPERM;
351 }
352
353 ret = rstc->rcdev->ops->reset(rstc->rcdev, rstc->id);
354 if (rstc->shared && ret)
355 atomic_dec(&rstc->triggered_count);
356
357 return ret;
358 }
359 EXPORT_SYMBOL_GPL(reset_control_reset);
360
361 /**
362 * reset_control_bulk_reset - reset the controlled devices in order
363 * @num_rstcs: number of entries in rstcs array
364 * @rstcs: array of struct reset_control_bulk_data with reset controls set
365 *
366 * Issue a reset on all provided reset controls, in order.
367 *
368 * See also: reset_control_reset()
369 */
reset_control_bulk_reset(int num_rstcs,struct reset_control_bulk_data * rstcs)370 int reset_control_bulk_reset(int num_rstcs,
371 struct reset_control_bulk_data *rstcs)
372 {
373 int ret, i;
374
375 for (i = 0; i < num_rstcs; i++) {
376 ret = reset_control_reset(rstcs[i].rstc);
377 if (ret)
378 return ret;
379 }
380
381 return 0;
382 }
383 EXPORT_SYMBOL_GPL(reset_control_bulk_reset);
384
385 /**
386 * reset_control_rearm - allow shared reset line to be re-triggered"
387 * @rstc: reset controller
388 *
389 * On a shared reset line the actual reset pulse is only triggered once for the
390 * lifetime of the reset_control instance, except if this call is used.
391 *
392 * Calls to this function must be balanced with calls to reset_control_reset,
393 * a warning is thrown in case triggered_count ever dips below 0.
394 *
395 * Consumers must not use reset_control_(de)assert on shared reset lines when
396 * reset_control_reset or reset_control_rearm have been used.
397 *
398 * If rstc is NULL the function will just return 0.
399 */
reset_control_rearm(struct reset_control * rstc)400 int reset_control_rearm(struct reset_control *rstc)
401 {
402 if (!rstc)
403 return 0;
404
405 if (WARN_ON(IS_ERR(rstc)))
406 return -EINVAL;
407
408 if (reset_control_is_array(rstc))
409 return reset_control_array_rearm(rstc_to_array(rstc));
410
411 if (rstc->shared) {
412 if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
413 return -EINVAL;
414
415 WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
416 } else {
417 if (!rstc->acquired)
418 return -EPERM;
419 }
420
421 return 0;
422 }
423 EXPORT_SYMBOL_GPL(reset_control_rearm);
424
425 /**
426 * reset_control_assert - asserts the reset line
427 * @rstc: reset controller
428 *
429 * Calling this on an exclusive reset controller guarantees that the reset
430 * will be asserted. When called on a shared reset controller the line may
431 * still be deasserted, as long as other users keep it so.
432 *
433 * For shared reset controls a driver cannot expect the hw's registers and
434 * internal state to be reset, but must be prepared for this to happen.
435 * Consumers must not use reset_control_reset on shared reset lines when
436 * reset_control_(de)assert has been used.
437 *
438 * If rstc is NULL it is an optional reset and the function will just
439 * return 0.
440 */
reset_control_assert(struct reset_control * rstc)441 int reset_control_assert(struct reset_control *rstc)
442 {
443 if (!rstc)
444 return 0;
445
446 if (WARN_ON(IS_ERR(rstc)))
447 return -EINVAL;
448
449 if (reset_control_is_array(rstc))
450 return reset_control_array_assert(rstc_to_array(rstc));
451
452 if (rstc->shared) {
453 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
454 return -EINVAL;
455
456 if (WARN_ON(atomic_read(&rstc->deassert_count) == 0))
457 return -EINVAL;
458
459 if (atomic_dec_return(&rstc->deassert_count) != 0)
460 return 0;
461
462 /*
463 * Shared reset controls allow the reset line to be in any state
464 * after this call, so doing nothing is a valid option.
465 */
466 if (!rstc->rcdev->ops->assert)
467 return 0;
468 } else {
469 /*
470 * If the reset controller does not implement .assert(), there
471 * is no way to guarantee that the reset line is asserted after
472 * this call.
473 */
474 if (!rstc->rcdev->ops->assert)
475 return -ENOTSUPP;
476
477 if (!rstc->acquired) {
478 WARN(1, "reset %s (ID: %u) is not acquired\n",
479 rcdev_name(rstc->rcdev), rstc->id);
480 return -EPERM;
481 }
482 }
483
484 return rstc->rcdev->ops->assert(rstc->rcdev, rstc->id);
485 }
486 EXPORT_SYMBOL_GPL(reset_control_assert);
487
488 /**
489 * reset_control_bulk_assert - asserts the reset lines in order
490 * @num_rstcs: number of entries in rstcs array
491 * @rstcs: array of struct reset_control_bulk_data with reset controls set
492 *
493 * Assert the reset lines for all provided reset controls, in order.
494 * If an assertion fails, already asserted resets are deasserted again.
495 *
496 * See also: reset_control_assert()
497 */
reset_control_bulk_assert(int num_rstcs,struct reset_control_bulk_data * rstcs)498 int reset_control_bulk_assert(int num_rstcs,
499 struct reset_control_bulk_data *rstcs)
500 {
501 int ret, i;
502
503 for (i = 0; i < num_rstcs; i++) {
504 ret = reset_control_assert(rstcs[i].rstc);
505 if (ret)
506 goto err;
507 }
508
509 return 0;
510
511 err:
512 while (i--)
513 reset_control_deassert(rstcs[i].rstc);
514 return ret;
515 }
516 EXPORT_SYMBOL_GPL(reset_control_bulk_assert);
517
518 /**
519 * reset_control_deassert - deasserts the reset line
520 * @rstc: reset controller
521 *
522 * After calling this function, the reset is guaranteed to be deasserted.
523 * Consumers must not use reset_control_reset on shared reset lines when
524 * reset_control_(de)assert has been used.
525 *
526 * If rstc is NULL it is an optional reset and the function will just
527 * return 0.
528 */
reset_control_deassert(struct reset_control * rstc)529 int reset_control_deassert(struct reset_control *rstc)
530 {
531 if (!rstc)
532 return 0;
533
534 if (WARN_ON(IS_ERR(rstc)))
535 return -EINVAL;
536
537 if (reset_control_is_array(rstc))
538 return reset_control_array_deassert(rstc_to_array(rstc));
539
540 if (rstc->shared) {
541 if (WARN_ON(atomic_read(&rstc->triggered_count) != 0))
542 return -EINVAL;
543
544 if (atomic_inc_return(&rstc->deassert_count) != 1)
545 return 0;
546 } else {
547 if (!rstc->acquired) {
548 WARN(1, "reset %s (ID: %u) is not acquired\n",
549 rcdev_name(rstc->rcdev), rstc->id);
550 return -EPERM;
551 }
552 }
553
554 /*
555 * If the reset controller does not implement .deassert(), we assume
556 * that it handles self-deasserting reset lines via .reset(). In that
557 * case, the reset lines are deasserted by default. If that is not the
558 * case, the reset controller driver should implement .deassert() and
559 * return -ENOTSUPP.
560 */
561 if (!rstc->rcdev->ops->deassert)
562 return 0;
563
564 return rstc->rcdev->ops->deassert(rstc->rcdev, rstc->id);
565 }
566 EXPORT_SYMBOL_GPL(reset_control_deassert);
567
568 /**
569 * reset_control_bulk_deassert - deasserts the reset lines in reverse order
570 * @num_rstcs: number of entries in rstcs array
571 * @rstcs: array of struct reset_control_bulk_data with reset controls set
572 *
573 * Deassert the reset lines for all provided reset controls, in reverse order.
574 * If a deassertion fails, already deasserted resets are asserted again.
575 *
576 * See also: reset_control_deassert()
577 */
reset_control_bulk_deassert(int num_rstcs,struct reset_control_bulk_data * rstcs)578 int reset_control_bulk_deassert(int num_rstcs,
579 struct reset_control_bulk_data *rstcs)
580 {
581 int ret, i;
582
583 for (i = num_rstcs - 1; i >= 0; i--) {
584 ret = reset_control_deassert(rstcs[i].rstc);
585 if (ret)
586 goto err;
587 }
588
589 return 0;
590
591 err:
592 while (i < num_rstcs)
593 reset_control_assert(rstcs[i++].rstc);
594 return ret;
595 }
596 EXPORT_SYMBOL_GPL(reset_control_bulk_deassert);
597
598 /**
599 * reset_control_status - returns a negative errno if not supported, a
600 * positive value if the reset line is asserted, or zero if the reset
601 * line is not asserted or if the desc is NULL (optional reset).
602 * @rstc: reset controller
603 */
reset_control_status(struct reset_control * rstc)604 int reset_control_status(struct reset_control *rstc)
605 {
606 if (!rstc)
607 return 0;
608
609 if (WARN_ON(IS_ERR(rstc)) || reset_control_is_array(rstc))
610 return -EINVAL;
611
612 if (rstc->rcdev->ops->status)
613 return rstc->rcdev->ops->status(rstc->rcdev, rstc->id);
614
615 return -ENOTSUPP;
616 }
617 EXPORT_SYMBOL_GPL(reset_control_status);
618
619 /**
620 * reset_control_acquire() - acquires a reset control for exclusive use
621 * @rstc: reset control
622 *
623 * This is used to explicitly acquire a reset control for exclusive use. Note
624 * that exclusive resets are requested as acquired by default. In order for a
625 * second consumer to be able to control the reset, the first consumer has to
626 * release it first. Typically the easiest way to achieve this is to call the
627 * reset_control_get_exclusive_released() to obtain an instance of the reset
628 * control. Such reset controls are not acquired by default.
629 *
630 * Consumers implementing shared access to an exclusive reset need to follow
631 * a specific protocol in order to work together. Before consumers can change
632 * a reset they must acquire exclusive access using reset_control_acquire().
633 * After they are done operating the reset, they must release exclusive access
634 * with a call to reset_control_release(). Consumers are not granted exclusive
635 * access to the reset as long as another consumer hasn't released a reset.
636 *
637 * See also: reset_control_release()
638 */
reset_control_acquire(struct reset_control * rstc)639 int reset_control_acquire(struct reset_control *rstc)
640 {
641 struct reset_control *rc;
642
643 if (!rstc)
644 return 0;
645
646 if (WARN_ON(IS_ERR(rstc)))
647 return -EINVAL;
648
649 if (reset_control_is_array(rstc))
650 return reset_control_array_acquire(rstc_to_array(rstc));
651
652 mutex_lock(&reset_list_mutex);
653
654 if (rstc->acquired) {
655 mutex_unlock(&reset_list_mutex);
656 return 0;
657 }
658
659 list_for_each_entry(rc, &rstc->rcdev->reset_control_head, list) {
660 if (rstc != rc && rstc->id == rc->id) {
661 if (rc->acquired) {
662 mutex_unlock(&reset_list_mutex);
663 return -EBUSY;
664 }
665 }
666 }
667
668 rstc->acquired = true;
669
670 mutex_unlock(&reset_list_mutex);
671 return 0;
672 }
673 EXPORT_SYMBOL_GPL(reset_control_acquire);
674
675 /**
676 * reset_control_bulk_acquire - acquires reset controls for exclusive use
677 * @num_rstcs: number of entries in rstcs array
678 * @rstcs: array of struct reset_control_bulk_data with reset controls set
679 *
680 * This is used to explicitly acquire reset controls requested with
681 * reset_control_bulk_get_exclusive_release() for temporary exclusive use.
682 *
683 * See also: reset_control_acquire(), reset_control_bulk_release()
684 */
reset_control_bulk_acquire(int num_rstcs,struct reset_control_bulk_data * rstcs)685 int reset_control_bulk_acquire(int num_rstcs,
686 struct reset_control_bulk_data *rstcs)
687 {
688 int ret, i;
689
690 for (i = 0; i < num_rstcs; i++) {
691 ret = reset_control_acquire(rstcs[i].rstc);
692 if (ret)
693 goto err;
694 }
695
696 return 0;
697
698 err:
699 while (i--)
700 reset_control_release(rstcs[i].rstc);
701 return ret;
702 }
703 EXPORT_SYMBOL_GPL(reset_control_bulk_acquire);
704
705 /**
706 * reset_control_release() - releases exclusive access to a reset control
707 * @rstc: reset control
708 *
709 * Releases exclusive access right to a reset control previously obtained by a
710 * call to reset_control_acquire(). Until a consumer calls this function, no
711 * other consumers will be granted exclusive access.
712 *
713 * See also: reset_control_acquire()
714 */
reset_control_release(struct reset_control * rstc)715 void reset_control_release(struct reset_control *rstc)
716 {
717 if (!rstc || WARN_ON(IS_ERR(rstc)))
718 return;
719
720 if (reset_control_is_array(rstc))
721 reset_control_array_release(rstc_to_array(rstc));
722 else
723 rstc->acquired = false;
724 }
725 EXPORT_SYMBOL_GPL(reset_control_release);
726
727 /**
728 * reset_control_bulk_release() - releases exclusive access to reset controls
729 * @num_rstcs: number of entries in rstcs array
730 * @rstcs: array of struct reset_control_bulk_data with reset controls set
731 *
732 * Releases exclusive access right to reset controls previously obtained by a
733 * call to reset_control_bulk_acquire().
734 *
735 * See also: reset_control_release(), reset_control_bulk_acquire()
736 */
reset_control_bulk_release(int num_rstcs,struct reset_control_bulk_data * rstcs)737 void reset_control_bulk_release(int num_rstcs,
738 struct reset_control_bulk_data *rstcs)
739 {
740 int i;
741
742 for (i = 0; i < num_rstcs; i++)
743 reset_control_release(rstcs[i].rstc);
744 }
745 EXPORT_SYMBOL_GPL(reset_control_bulk_release);
746
747 static struct reset_control *
__reset_control_get_internal(struct reset_controller_dev * rcdev,unsigned int index,bool shared,bool acquired)748 __reset_control_get_internal(struct reset_controller_dev *rcdev,
749 unsigned int index, bool shared, bool acquired)
750 {
751 struct reset_control *rstc;
752
753 lockdep_assert_held(&reset_list_mutex);
754
755 list_for_each_entry(rstc, &rcdev->reset_control_head, list) {
756 if (rstc->id == index) {
757 /*
758 * Allow creating a secondary exclusive reset_control
759 * that is initially not acquired for an already
760 * controlled reset line.
761 */
762 if (!rstc->shared && !shared && !acquired)
763 break;
764
765 if (WARN_ON(!rstc->shared || !shared))
766 return ERR_PTR(-EBUSY);
767
768 kref_get(&rstc->refcnt);
769 return rstc;
770 }
771 }
772
773 rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
774 if (!rstc)
775 return ERR_PTR(-ENOMEM);
776
777 if (!try_module_get(rcdev->owner)) {
778 kfree(rstc);
779 return ERR_PTR(-ENODEV);
780 }
781
782 rstc->rcdev = rcdev;
783 list_add(&rstc->list, &rcdev->reset_control_head);
784 rstc->id = index;
785 kref_init(&rstc->refcnt);
786 rstc->acquired = acquired;
787 rstc->shared = shared;
788
789 return rstc;
790 }
791
__reset_control_release(struct kref * kref)792 static void __reset_control_release(struct kref *kref)
793 {
794 struct reset_control *rstc = container_of(kref, struct reset_control,
795 refcnt);
796
797 lockdep_assert_held(&reset_list_mutex);
798
799 module_put(rstc->rcdev->owner);
800
801 list_del(&rstc->list);
802 kfree(rstc);
803 }
804
__reset_control_put_internal(struct reset_control * rstc)805 static void __reset_control_put_internal(struct reset_control *rstc)
806 {
807 lockdep_assert_held(&reset_list_mutex);
808
809 kref_put(&rstc->refcnt, __reset_control_release);
810 }
811
812 struct reset_control *
__of_reset_control_get(struct device_node * node,const char * id,int index,bool shared,bool optional,bool acquired)813 __of_reset_control_get(struct device_node *node, const char *id, int index,
814 bool shared, bool optional, bool acquired)
815 {
816 struct reset_control *rstc;
817 struct reset_controller_dev *r, *rcdev;
818 struct of_phandle_args args;
819 int rstc_id;
820 int ret;
821
822 if (!node)
823 return ERR_PTR(-EINVAL);
824
825 if (id) {
826 index = of_property_match_string(node,
827 "reset-names", id);
828 if (index == -EILSEQ)
829 return ERR_PTR(index);
830 if (index < 0)
831 return optional ? NULL : ERR_PTR(-ENOENT);
832 }
833
834 ret = of_parse_phandle_with_args(node, "resets", "#reset-cells",
835 index, &args);
836 if (ret == -EINVAL)
837 return ERR_PTR(ret);
838 if (ret)
839 return optional ? NULL : ERR_PTR(ret);
840
841 mutex_lock(&reset_list_mutex);
842 rcdev = NULL;
843 list_for_each_entry(r, &reset_controller_list, list) {
844 if (args.np == r->of_node) {
845 rcdev = r;
846 break;
847 }
848 }
849
850 if (!rcdev) {
851 rstc = ERR_PTR(-EPROBE_DEFER);
852 goto out;
853 }
854
855 if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
856 rstc = ERR_PTR(-EINVAL);
857 goto out;
858 }
859
860 rstc_id = rcdev->of_xlate(rcdev, &args);
861 if (rstc_id < 0) {
862 rstc = ERR_PTR(rstc_id);
863 goto out;
864 }
865
866 /* reset_list_mutex also protects the rcdev's reset_control list */
867 rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
868
869 out:
870 mutex_unlock(&reset_list_mutex);
871 of_node_put(args.np);
872
873 return rstc;
874 }
875 EXPORT_SYMBOL_GPL(__of_reset_control_get);
876
877 static struct reset_controller_dev *
__reset_controller_by_name(const char * name)878 __reset_controller_by_name(const char *name)
879 {
880 struct reset_controller_dev *rcdev;
881
882 lockdep_assert_held(&reset_list_mutex);
883
884 list_for_each_entry(rcdev, &reset_controller_list, list) {
885 if (!rcdev->dev)
886 continue;
887
888 if (!strcmp(name, dev_name(rcdev->dev)))
889 return rcdev;
890 }
891
892 return NULL;
893 }
894
895 static struct reset_control *
__reset_control_get_from_lookup(struct device * dev,const char * con_id,bool shared,bool optional,bool acquired)896 __reset_control_get_from_lookup(struct device *dev, const char *con_id,
897 bool shared, bool optional, bool acquired)
898 {
899 const struct reset_control_lookup *lookup;
900 struct reset_controller_dev *rcdev;
901 const char *dev_id = dev_name(dev);
902 struct reset_control *rstc = NULL;
903
904 mutex_lock(&reset_lookup_mutex);
905
906 list_for_each_entry(lookup, &reset_lookup_list, list) {
907 if (strcmp(lookup->dev_id, dev_id))
908 continue;
909
910 if ((!con_id && !lookup->con_id) ||
911 ((con_id && lookup->con_id) &&
912 !strcmp(con_id, lookup->con_id))) {
913 mutex_lock(&reset_list_mutex);
914 rcdev = __reset_controller_by_name(lookup->provider);
915 if (!rcdev) {
916 mutex_unlock(&reset_list_mutex);
917 mutex_unlock(&reset_lookup_mutex);
918 /* Reset provider may not be ready yet. */
919 return ERR_PTR(-EPROBE_DEFER);
920 }
921
922 rstc = __reset_control_get_internal(rcdev,
923 lookup->index,
924 shared, acquired);
925 mutex_unlock(&reset_list_mutex);
926 break;
927 }
928 }
929
930 mutex_unlock(&reset_lookup_mutex);
931
932 if (!rstc)
933 return optional ? NULL : ERR_PTR(-ENOENT);
934
935 return rstc;
936 }
937
__reset_control_get(struct device * dev,const char * id,int index,bool shared,bool optional,bool acquired)938 struct reset_control *__reset_control_get(struct device *dev, const char *id,
939 int index, bool shared, bool optional,
940 bool acquired)
941 {
942 if (WARN_ON(shared && acquired))
943 return ERR_PTR(-EINVAL);
944
945 if (dev->of_node)
946 return __of_reset_control_get(dev->of_node, id, index, shared,
947 optional, acquired);
948
949 return __reset_control_get_from_lookup(dev, id, shared, optional,
950 acquired);
951 }
952 EXPORT_SYMBOL_GPL(__reset_control_get);
953
__reset_control_bulk_get(struct device * dev,int num_rstcs,struct reset_control_bulk_data * rstcs,bool shared,bool optional,bool acquired)954 int __reset_control_bulk_get(struct device *dev, int num_rstcs,
955 struct reset_control_bulk_data *rstcs,
956 bool shared, bool optional, bool acquired)
957 {
958 int ret, i;
959
960 for (i = 0; i < num_rstcs; i++) {
961 rstcs[i].rstc = __reset_control_get(dev, rstcs[i].id, 0,
962 shared, optional, acquired);
963 if (IS_ERR(rstcs[i].rstc)) {
964 ret = PTR_ERR(rstcs[i].rstc);
965 goto err;
966 }
967 }
968
969 return 0;
970
971 err:
972 mutex_lock(&reset_list_mutex);
973 while (i--)
974 __reset_control_put_internal(rstcs[i].rstc);
975 mutex_unlock(&reset_list_mutex);
976 return ret;
977 }
978 EXPORT_SYMBOL_GPL(__reset_control_bulk_get);
979
reset_control_array_put(struct reset_control_array * resets)980 static void reset_control_array_put(struct reset_control_array *resets)
981 {
982 int i;
983
984 mutex_lock(&reset_list_mutex);
985 for (i = 0; i < resets->num_rstcs; i++)
986 __reset_control_put_internal(resets->rstc[i]);
987 mutex_unlock(&reset_list_mutex);
988 kfree(resets);
989 }
990
991 /**
992 * reset_control_put - free the reset controller
993 * @rstc: reset controller
994 */
reset_control_put(struct reset_control * rstc)995 void reset_control_put(struct reset_control *rstc)
996 {
997 if (IS_ERR_OR_NULL(rstc))
998 return;
999
1000 if (reset_control_is_array(rstc)) {
1001 reset_control_array_put(rstc_to_array(rstc));
1002 return;
1003 }
1004
1005 mutex_lock(&reset_list_mutex);
1006 __reset_control_put_internal(rstc);
1007 mutex_unlock(&reset_list_mutex);
1008 }
1009 EXPORT_SYMBOL_GPL(reset_control_put);
1010
1011 /**
1012 * reset_control_bulk_put - free the reset controllers
1013 * @num_rstcs: number of entries in rstcs array
1014 * @rstcs: array of struct reset_control_bulk_data with reset controls set
1015 */
reset_control_bulk_put(int num_rstcs,struct reset_control_bulk_data * rstcs)1016 void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs)
1017 {
1018 mutex_lock(&reset_list_mutex);
1019 while (num_rstcs--) {
1020 if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc))
1021 continue;
1022 __reset_control_put_internal(rstcs[num_rstcs].rstc);
1023 }
1024 mutex_unlock(&reset_list_mutex);
1025 }
1026 EXPORT_SYMBOL_GPL(reset_control_bulk_put);
1027
devm_reset_control_release(struct device * dev,void * res)1028 static void devm_reset_control_release(struct device *dev, void *res)
1029 {
1030 reset_control_put(*(struct reset_control **)res);
1031 }
1032
1033 struct reset_control *
__devm_reset_control_get(struct device * dev,const char * id,int index,bool shared,bool optional,bool acquired)1034 __devm_reset_control_get(struct device *dev, const char *id, int index,
1035 bool shared, bool optional, bool acquired)
1036 {
1037 struct reset_control **ptr, *rstc;
1038
1039 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1040 GFP_KERNEL);
1041 if (!ptr)
1042 return ERR_PTR(-ENOMEM);
1043
1044 rstc = __reset_control_get(dev, id, index, shared, optional, acquired);
1045 if (IS_ERR_OR_NULL(rstc)) {
1046 devres_free(ptr);
1047 return rstc;
1048 }
1049
1050 *ptr = rstc;
1051 devres_add(dev, ptr);
1052
1053 return rstc;
1054 }
1055 EXPORT_SYMBOL_GPL(__devm_reset_control_get);
1056
1057 struct reset_control_bulk_devres {
1058 int num_rstcs;
1059 struct reset_control_bulk_data *rstcs;
1060 };
1061
devm_reset_control_bulk_release(struct device * dev,void * res)1062 static void devm_reset_control_bulk_release(struct device *dev, void *res)
1063 {
1064 struct reset_control_bulk_devres *devres = res;
1065
1066 reset_control_bulk_put(devres->num_rstcs, devres->rstcs);
1067 }
1068
__devm_reset_control_bulk_get(struct device * dev,int num_rstcs,struct reset_control_bulk_data * rstcs,bool shared,bool optional,bool acquired)1069 int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs,
1070 struct reset_control_bulk_data *rstcs,
1071 bool shared, bool optional, bool acquired)
1072 {
1073 struct reset_control_bulk_devres *ptr;
1074 int ret;
1075
1076 ptr = devres_alloc(devm_reset_control_bulk_release, sizeof(*ptr),
1077 GFP_KERNEL);
1078 if (!ptr)
1079 return -ENOMEM;
1080
1081 ret = __reset_control_bulk_get(dev, num_rstcs, rstcs, shared, optional, acquired);
1082 if (ret < 0) {
1083 devres_free(ptr);
1084 return ret;
1085 }
1086
1087 ptr->num_rstcs = num_rstcs;
1088 ptr->rstcs = rstcs;
1089 devres_add(dev, ptr);
1090
1091 return 0;
1092 }
1093 EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
1094
1095 /**
1096 * __device_reset - find reset controller associated with the device
1097 * and perform reset
1098 * @dev: device to be reset by the controller
1099 * @optional: whether it is optional to reset the device
1100 *
1101 * Convenience wrapper for __reset_control_get() and reset_control_reset().
1102 * This is useful for the common case of devices with single, dedicated reset
1103 * lines.
1104 */
__device_reset(struct device * dev,bool optional)1105 int __device_reset(struct device *dev, bool optional)
1106 {
1107 struct reset_control *rstc;
1108 int ret;
1109
1110 rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
1111 if (IS_ERR(rstc))
1112 return PTR_ERR(rstc);
1113
1114 ret = reset_control_reset(rstc);
1115
1116 reset_control_put(rstc);
1117
1118 return ret;
1119 }
1120 EXPORT_SYMBOL_GPL(__device_reset);
1121
1122 /*
1123 * APIs to manage an array of reset controls.
1124 */
1125
1126 /**
1127 * of_reset_control_get_count - Count number of resets available with a device
1128 *
1129 * @node: device node that contains 'resets'.
1130 *
1131 * Returns positive reset count on success, or error number on failure and
1132 * on count being zero.
1133 */
of_reset_control_get_count(struct device_node * node)1134 static int of_reset_control_get_count(struct device_node *node)
1135 {
1136 int count;
1137
1138 if (!node)
1139 return -EINVAL;
1140
1141 count = of_count_phandle_with_args(node, "resets", "#reset-cells");
1142 if (count == 0)
1143 count = -ENOENT;
1144
1145 return count;
1146 }
1147
1148 /**
1149 * of_reset_control_array_get - Get a list of reset controls using
1150 * device node.
1151 *
1152 * @np: device node for the device that requests the reset controls array
1153 * @shared: whether reset controls are shared or not
1154 * @optional: whether it is optional to get the reset controls
1155 * @acquired: only one reset control may be acquired for a given controller
1156 * and ID
1157 *
1158 * Returns pointer to allocated reset_control on success or error on failure
1159 */
1160 struct reset_control *
of_reset_control_array_get(struct device_node * np,bool shared,bool optional,bool acquired)1161 of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
1162 bool acquired)
1163 {
1164 struct reset_control_array *resets;
1165 struct reset_control *rstc;
1166 int num, i;
1167
1168 num = of_reset_control_get_count(np);
1169 if (num < 0)
1170 return optional ? NULL : ERR_PTR(num);
1171
1172 resets = kzalloc(struct_size(resets, rstc, num), GFP_KERNEL);
1173 if (!resets)
1174 return ERR_PTR(-ENOMEM);
1175
1176 for (i = 0; i < num; i++) {
1177 rstc = __of_reset_control_get(np, NULL, i, shared, optional,
1178 acquired);
1179 if (IS_ERR(rstc))
1180 goto err_rst;
1181 resets->rstc[i] = rstc;
1182 }
1183 resets->num_rstcs = num;
1184 resets->base.array = true;
1185
1186 return &resets->base;
1187
1188 err_rst:
1189 mutex_lock(&reset_list_mutex);
1190 while (--i >= 0)
1191 __reset_control_put_internal(resets->rstc[i]);
1192 mutex_unlock(&reset_list_mutex);
1193
1194 kfree(resets);
1195
1196 return rstc;
1197 }
1198 EXPORT_SYMBOL_GPL(of_reset_control_array_get);
1199
1200 /**
1201 * devm_reset_control_array_get - Resource managed reset control array get
1202 *
1203 * @dev: device that requests the list of reset controls
1204 * @shared: whether reset controls are shared or not
1205 * @optional: whether it is optional to get the reset controls
1206 *
1207 * The reset control array APIs are intended for a list of resets
1208 * that just have to be asserted or deasserted, without any
1209 * requirements on the order.
1210 *
1211 * Returns pointer to allocated reset_control on success or error on failure
1212 */
1213 struct reset_control *
devm_reset_control_array_get(struct device * dev,bool shared,bool optional)1214 devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
1215 {
1216 struct reset_control **ptr, *rstc;
1217
1218 ptr = devres_alloc(devm_reset_control_release, sizeof(*ptr),
1219 GFP_KERNEL);
1220 if (!ptr)
1221 return ERR_PTR(-ENOMEM);
1222
1223 rstc = of_reset_control_array_get(dev->of_node, shared, optional, true);
1224 if (IS_ERR_OR_NULL(rstc)) {
1225 devres_free(ptr);
1226 return rstc;
1227 }
1228
1229 *ptr = rstc;
1230 devres_add(dev, ptr);
1231
1232 return rstc;
1233 }
1234 EXPORT_SYMBOL_GPL(devm_reset_control_array_get);
1235
reset_control_get_count_from_lookup(struct device * dev)1236 static int reset_control_get_count_from_lookup(struct device *dev)
1237 {
1238 const struct reset_control_lookup *lookup;
1239 const char *dev_id;
1240 int count = 0;
1241
1242 if (!dev)
1243 return -EINVAL;
1244
1245 dev_id = dev_name(dev);
1246 mutex_lock(&reset_lookup_mutex);
1247
1248 list_for_each_entry(lookup, &reset_lookup_list, list) {
1249 if (!strcmp(lookup->dev_id, dev_id))
1250 count++;
1251 }
1252
1253 mutex_unlock(&reset_lookup_mutex);
1254
1255 if (count == 0)
1256 count = -ENOENT;
1257
1258 return count;
1259 }
1260
1261 /**
1262 * reset_control_get_count - Count number of resets available with a device
1263 *
1264 * @dev: device for which to return the number of resets
1265 *
1266 * Returns positive reset count on success, or error number on failure and
1267 * on count being zero.
1268 */
reset_control_get_count(struct device * dev)1269 int reset_control_get_count(struct device *dev)
1270 {
1271 if (dev->of_node)
1272 return of_reset_control_get_count(dev->of_node);
1273
1274 return reset_control_get_count_from_lookup(dev);
1275 }
1276 EXPORT_SYMBOL_GPL(reset_control_get_count);
1277