1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl>
4 * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com>
5 */
6
7 #include <linux/irq.h>
8 #include <linux/irq_sim.h>
9 #include <linux/irq_work.h>
10 #include <linux/interrupt.h>
11 #include <linux/slab.h>
12
13 struct irq_sim_work_ctx {
14 struct irq_work work;
15 int irq_base;
16 unsigned int irq_count;
17 unsigned long *pending;
18 struct irq_domain *domain;
19 };
20
21 struct irq_sim_irq_ctx {
22 int irqnum;
23 bool enabled;
24 struct irq_sim_work_ctx *work_ctx;
25 };
26
27 struct irq_sim_devres {
28 struct irq_domain *domain;
29 };
30
irq_sim_irqmask(struct irq_data * data)31 static void irq_sim_irqmask(struct irq_data *data)
32 {
33 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
34
35 irq_ctx->enabled = false;
36 }
37
irq_sim_irqunmask(struct irq_data * data)38 static void irq_sim_irqunmask(struct irq_data *data)
39 {
40 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
41
42 irq_ctx->enabled = true;
43 }
44
irq_sim_set_type(struct irq_data * data,unsigned int type)45 static int irq_sim_set_type(struct irq_data *data, unsigned int type)
46 {
47 /* We only support rising and falling edge trigger types. */
48 if (type & ~IRQ_TYPE_EDGE_BOTH)
49 return -EINVAL;
50
51 irqd_set_trigger_type(data, type);
52
53 return 0;
54 }
55
irq_sim_get_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool * state)56 static int irq_sim_get_irqchip_state(struct irq_data *data,
57 enum irqchip_irq_state which, bool *state)
58 {
59 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
60 irq_hw_number_t hwirq = irqd_to_hwirq(data);
61
62 switch (which) {
63 case IRQCHIP_STATE_PENDING:
64 if (irq_ctx->enabled)
65 *state = test_bit(hwirq, irq_ctx->work_ctx->pending);
66 break;
67 default:
68 return -EINVAL;
69 }
70
71 return 0;
72 }
73
irq_sim_set_irqchip_state(struct irq_data * data,enum irqchip_irq_state which,bool state)74 static int irq_sim_set_irqchip_state(struct irq_data *data,
75 enum irqchip_irq_state which, bool state)
76 {
77 struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
78 irq_hw_number_t hwirq = irqd_to_hwirq(data);
79
80 switch (which) {
81 case IRQCHIP_STATE_PENDING:
82 if (irq_ctx->enabled) {
83 assign_bit(hwirq, irq_ctx->work_ctx->pending, state);
84 if (state)
85 irq_work_queue(&irq_ctx->work_ctx->work);
86 }
87 break;
88 default:
89 return -EINVAL;
90 }
91
92 return 0;
93 }
94
95 static struct irq_chip irq_sim_irqchip = {
96 .name = "irq_sim",
97 .irq_mask = irq_sim_irqmask,
98 .irq_unmask = irq_sim_irqunmask,
99 .irq_set_type = irq_sim_set_type,
100 .irq_get_irqchip_state = irq_sim_get_irqchip_state,
101 .irq_set_irqchip_state = irq_sim_set_irqchip_state,
102 };
103
irq_sim_handle_irq(struct irq_work * work)104 static void irq_sim_handle_irq(struct irq_work *work)
105 {
106 struct irq_sim_work_ctx *work_ctx;
107 unsigned int offset = 0;
108 int irqnum;
109
110 work_ctx = container_of(work, struct irq_sim_work_ctx, work);
111
112 while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) {
113 offset = find_next_bit(work_ctx->pending,
114 work_ctx->irq_count, offset);
115 clear_bit(offset, work_ctx->pending);
116 irqnum = irq_find_mapping(work_ctx->domain, offset);
117 handle_simple_irq(irq_to_desc(irqnum));
118 }
119 }
120
irq_sim_domain_map(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hw)121 static int irq_sim_domain_map(struct irq_domain *domain,
122 unsigned int virq, irq_hw_number_t hw)
123 {
124 struct irq_sim_work_ctx *work_ctx = domain->host_data;
125 struct irq_sim_irq_ctx *irq_ctx;
126
127 irq_ctx = kzalloc(sizeof(*irq_ctx), GFP_KERNEL);
128 if (!irq_ctx)
129 return -ENOMEM;
130
131 irq_set_chip(virq, &irq_sim_irqchip);
132 irq_set_chip_data(virq, irq_ctx);
133 irq_set_handler(virq, handle_simple_irq);
134 irq_modify_status(virq, IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
135 irq_ctx->work_ctx = work_ctx;
136
137 return 0;
138 }
139
irq_sim_domain_unmap(struct irq_domain * domain,unsigned int virq)140 static void irq_sim_domain_unmap(struct irq_domain *domain, unsigned int virq)
141 {
142 struct irq_sim_irq_ctx *irq_ctx;
143 struct irq_data *irqd;
144
145 irqd = irq_domain_get_irq_data(domain, virq);
146 irq_ctx = irq_data_get_irq_chip_data(irqd);
147
148 irq_set_handler(virq, NULL);
149 irq_domain_reset_irq_data(irqd);
150 kfree(irq_ctx);
151 }
152
153 static const struct irq_domain_ops irq_sim_domain_ops = {
154 .map = irq_sim_domain_map,
155 .unmap = irq_sim_domain_unmap,
156 };
157
158 /**
159 * irq_domain_create_sim - Create a new interrupt simulator irq_domain and
160 * allocate a range of dummy interrupts.
161 *
162 * @fnode: struct fwnode_handle to be associated with this domain.
163 * @num_irqs: Number of interrupts to allocate.
164 *
165 * On success: return a new irq_domain object.
166 * On failure: a negative errno wrapped with ERR_PTR().
167 */
irq_domain_create_sim(struct fwnode_handle * fwnode,unsigned int num_irqs)168 struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode,
169 unsigned int num_irqs)
170 {
171 struct irq_sim_work_ctx *work_ctx;
172
173 work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL);
174 if (!work_ctx)
175 goto err_out;
176
177 work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
178 if (!work_ctx->pending)
179 goto err_free_work_ctx;
180
181 work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs,
182 &irq_sim_domain_ops,
183 work_ctx);
184 if (!work_ctx->domain)
185 goto err_free_bitmap;
186
187 work_ctx->irq_count = num_irqs;
188 init_irq_work(&work_ctx->work, irq_sim_handle_irq);
189
190 return work_ctx->domain;
191
192 err_free_bitmap:
193 bitmap_free(work_ctx->pending);
194 err_free_work_ctx:
195 kfree(work_ctx);
196 err_out:
197 return ERR_PTR(-ENOMEM);
198 }
199 EXPORT_SYMBOL_GPL(irq_domain_create_sim);
200
201 /**
202 * irq_domain_remove_sim - Deinitialize the interrupt simulator domain: free
203 * the interrupt descriptors and allocated memory.
204 *
205 * @domain: The interrupt simulator domain to tear down.
206 */
irq_domain_remove_sim(struct irq_domain * domain)207 void irq_domain_remove_sim(struct irq_domain *domain)
208 {
209 struct irq_sim_work_ctx *work_ctx = domain->host_data;
210
211 irq_work_sync(&work_ctx->work);
212 bitmap_free(work_ctx->pending);
213 kfree(work_ctx);
214
215 irq_domain_remove(domain);
216 }
217 EXPORT_SYMBOL_GPL(irq_domain_remove_sim);
218
devm_irq_domain_release_sim(struct device * dev,void * res)219 static void devm_irq_domain_release_sim(struct device *dev, void *res)
220 {
221 struct irq_sim_devres *this = res;
222
223 irq_domain_remove_sim(this->domain);
224 }
225
226 /**
227 * devm_irq_domain_create_sim - Create a new interrupt simulator for
228 * a managed device.
229 *
230 * @dev: Device to initialize the simulator object for.
231 * @fnode: struct fwnode_handle to be associated with this domain.
232 * @num_irqs: Number of interrupts to allocate
233 *
234 * On success: return a new irq_domain object.
235 * On failure: a negative errno wrapped with ERR_PTR().
236 */
devm_irq_domain_create_sim(struct device * dev,struct fwnode_handle * fwnode,unsigned int num_irqs)237 struct irq_domain *devm_irq_domain_create_sim(struct device *dev,
238 struct fwnode_handle *fwnode,
239 unsigned int num_irqs)
240 {
241 struct irq_sim_devres *dr;
242
243 dr = devres_alloc(devm_irq_domain_release_sim,
244 sizeof(*dr), GFP_KERNEL);
245 if (!dr)
246 return ERR_PTR(-ENOMEM);
247
248 dr->domain = irq_domain_create_sim(fwnode, num_irqs);
249 if (IS_ERR(dr->domain)) {
250 devres_free(dr);
251 return dr->domain;
252 }
253
254 devres_add(dev, dr);
255 return dr->domain;
256 }
257 EXPORT_SYMBOL_GPL(devm_irq_domain_create_sim);
258