1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 //
3 // Copyright (c) 2018 BayLibre, SAS.
4 // Author: Jerome Brunet <jbrunet@baylibre.com>
5 
6 #include <linux/clk.h>
7 #include <linux/of_irq.h>
8 #include <linux/of_platform.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/reset.h>
12 #include <sound/pcm_params.h>
13 #include <sound/soc.h>
14 #include <sound/soc-dai.h>
15 
16 #include "axg-fifo.h"
17 
18 /*
19  * This file implements the platform operations common to the playback and
20  * capture frontend DAI. The logic behind this two types of fifo is very
21  * similar but some difference exist.
22  * These differences the respective DAI drivers
23  */
24 
25 static struct snd_pcm_hardware axg_fifo_hw = {
26 	.info = (SNDRV_PCM_INFO_INTERLEAVED |
27 		 SNDRV_PCM_INFO_MMAP |
28 		 SNDRV_PCM_INFO_MMAP_VALID |
29 		 SNDRV_PCM_INFO_BLOCK_TRANSFER |
30 		 SNDRV_PCM_INFO_PAUSE),
31 
32 	.formats = AXG_FIFO_FORMATS,
33 	.rate_min = 5512,
34 	.rate_max = 192000,
35 	.channels_min = 1,
36 	.channels_max = AXG_FIFO_CH_MAX,
37 	.period_bytes_min = AXG_FIFO_MIN_DEPTH,
38 	.period_bytes_max = UINT_MAX,
39 	.periods_min = 2,
40 	.periods_max = UINT_MAX,
41 
42 	/* No real justification for this */
43 	.buffer_bytes_max = 1 * 1024 * 1024,
44 };
45 
axg_fifo_dai(struct snd_pcm_substream * ss)46 static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
47 {
48 	struct snd_soc_pcm_runtime *rtd = ss->private_data;
49 
50 	return rtd->cpu_dai;
51 }
52 
axg_fifo_data(struct snd_pcm_substream * ss)53 static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
54 {
55 	struct snd_soc_dai *dai = axg_fifo_dai(ss);
56 
57 	return snd_soc_dai_get_drvdata(dai);
58 }
59 
axg_fifo_dev(struct snd_pcm_substream * ss)60 static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
61 {
62 	struct snd_soc_dai *dai = axg_fifo_dai(ss);
63 
64 	return dai->dev;
65 }
66 
__dma_enable(struct axg_fifo * fifo,bool enable)67 static void __dma_enable(struct axg_fifo *fifo,  bool enable)
68 {
69 	regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
70 			   enable ? CTRL0_DMA_EN : 0);
71 }
72 
axg_fifo_pcm_trigger(struct snd_pcm_substream * ss,int cmd)73 static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
74 {
75 	struct axg_fifo *fifo = axg_fifo_data(ss);
76 
77 	switch (cmd) {
78 	case SNDRV_PCM_TRIGGER_START:
79 	case SNDRV_PCM_TRIGGER_RESUME:
80 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
81 		__dma_enable(fifo, true);
82 		break;
83 	case SNDRV_PCM_TRIGGER_SUSPEND:
84 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85 	case SNDRV_PCM_TRIGGER_STOP:
86 		__dma_enable(fifo, false);
87 		break;
88 	default:
89 		return -EINVAL;
90 	}
91 
92 	return 0;
93 }
94 
axg_fifo_pcm_pointer(struct snd_pcm_substream * ss)95 static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
96 {
97 	struct axg_fifo *fifo = axg_fifo_data(ss);
98 	struct snd_pcm_runtime *runtime = ss->runtime;
99 	unsigned int addr;
100 
101 	regmap_read(fifo->map, FIFO_STATUS2, &addr);
102 
103 	return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
104 }
105 
axg_fifo_pcm_hw_params(struct snd_pcm_substream * ss,struct snd_pcm_hw_params * params)106 static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
107 				  struct snd_pcm_hw_params *params)
108 {
109 	struct snd_pcm_runtime *runtime = ss->runtime;
110 	struct axg_fifo *fifo = axg_fifo_data(ss);
111 	dma_addr_t end_ptr;
112 	unsigned int burst_num;
113 	int ret;
114 
115 	ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(params));
116 	if (ret < 0)
117 		return ret;
118 
119 	/* Setup dma memory pointers */
120 	end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
121 	regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
122 	regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
123 
124 	/* Setup interrupt periodicity */
125 	burst_num = params_period_bytes(params) / AXG_FIFO_BURST;
126 	regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
127 
128 	/* Enable block count irq */
129 	regmap_update_bits(fifo->map, FIFO_CTRL0,
130 			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
131 			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT));
132 
133 	return 0;
134 }
135 
axg_fifo_pcm_hw_free(struct snd_pcm_substream * ss)136 static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
137 {
138 	struct axg_fifo *fifo = axg_fifo_data(ss);
139 
140 	/* Disable the block count irq */
141 	regmap_update_bits(fifo->map, FIFO_CTRL0,
142 			   CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
143 
144 	return snd_pcm_lib_free_pages(ss);
145 }
146 
axg_fifo_ack_irq(struct axg_fifo * fifo,u8 mask)147 static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
148 {
149 	regmap_update_bits(fifo->map, FIFO_CTRL1,
150 			   CTRL1_INT_CLR(FIFO_INT_MASK),
151 			   CTRL1_INT_CLR(mask));
152 
153 	/* Clear must also be cleared */
154 	regmap_update_bits(fifo->map, FIFO_CTRL1,
155 			   CTRL1_INT_CLR(FIFO_INT_MASK),
156 			   0);
157 }
158 
axg_fifo_pcm_irq_block(int irq,void * dev_id)159 static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
160 {
161 	struct snd_pcm_substream *ss = dev_id;
162 	struct axg_fifo *fifo = axg_fifo_data(ss);
163 	unsigned int status;
164 
165 	regmap_read(fifo->map, FIFO_STATUS1, &status);
166 
167 	status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
168 	if (status & FIFO_INT_COUNT_REPEAT)
169 		snd_pcm_period_elapsed(ss);
170 	else
171 		dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
172 			status);
173 
174 	/* Ack irqs */
175 	axg_fifo_ack_irq(fifo, status);
176 
177 	return IRQ_RETVAL(status);
178 }
179 
axg_fifo_pcm_open(struct snd_pcm_substream * ss)180 static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
181 {
182 	struct axg_fifo *fifo = axg_fifo_data(ss);
183 	struct device *dev = axg_fifo_dev(ss);
184 	int ret;
185 
186 	snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
187 
188 	/*
189 	 * Make sure the buffer and period size are multiple of the FIFO
190 	 * minimum depth size
191 	 */
192 	ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
193 					 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
194 					 AXG_FIFO_MIN_DEPTH);
195 	if (ret)
196 		return ret;
197 
198 	ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
199 					 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
200 					 AXG_FIFO_MIN_DEPTH);
201 	if (ret)
202 		return ret;
203 
204 	ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
205 			  dev_name(dev), ss);
206 
207 	/* Enable pclk to access registers and clock the fifo ip */
208 	ret = clk_prepare_enable(fifo->pclk);
209 	if (ret)
210 		return ret;
211 
212 	/* Setup status2 so it reports the memory pointer */
213 	regmap_update_bits(fifo->map, FIFO_CTRL1,
214 			   CTRL1_STATUS2_SEL_MASK,
215 			   CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
216 
217 	/* Make sure the dma is initially disabled */
218 	__dma_enable(fifo, false);
219 
220 	/* Disable irqs until params are ready */
221 	regmap_update_bits(fifo->map, FIFO_CTRL0,
222 			   CTRL0_INT_EN(FIFO_INT_MASK), 0);
223 
224 	/* Clear any pending interrupt */
225 	axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
226 
227 	/* Take memory arbitror out of reset */
228 	ret = reset_control_deassert(fifo->arb);
229 	if (ret)
230 		clk_disable_unprepare(fifo->pclk);
231 
232 	return ret;
233 }
234 
axg_fifo_pcm_close(struct snd_pcm_substream * ss)235 static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
236 {
237 	struct axg_fifo *fifo = axg_fifo_data(ss);
238 	int ret;
239 
240 	/* Put the memory arbitror back in reset */
241 	ret = reset_control_assert(fifo->arb);
242 
243 	/* Disable fifo ip and register access */
244 	clk_disable_unprepare(fifo->pclk);
245 
246 	/* remove IRQ */
247 	free_irq(fifo->irq, ss);
248 
249 	return ret;
250 }
251 
252 const struct snd_pcm_ops axg_fifo_pcm_ops = {
253 	.open =		axg_fifo_pcm_open,
254 	.close =        axg_fifo_pcm_close,
255 	.ioctl =	snd_pcm_lib_ioctl,
256 	.hw_params =	axg_fifo_pcm_hw_params,
257 	.hw_free =      axg_fifo_pcm_hw_free,
258 	.pointer =	axg_fifo_pcm_pointer,
259 	.trigger =	axg_fifo_pcm_trigger,
260 };
261 EXPORT_SYMBOL_GPL(axg_fifo_pcm_ops);
262 
axg_fifo_pcm_new(struct snd_soc_pcm_runtime * rtd,unsigned int type)263 int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
264 {
265 	struct snd_card *card = rtd->card->snd_card;
266 	size_t size = axg_fifo_hw.buffer_bytes_max;
267 
268 	return snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream,
269 					     SNDRV_DMA_TYPE_DEV, card->dev,
270 					     size, size);
271 }
272 EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
273 
274 static const struct regmap_config axg_fifo_regmap_cfg = {
275 	.reg_bits	= 32,
276 	.val_bits	= 32,
277 	.reg_stride	= 4,
278 	.max_register	= FIFO_STATUS2,
279 };
280 
axg_fifo_probe(struct platform_device * pdev)281 int axg_fifo_probe(struct platform_device *pdev)
282 {
283 	struct device *dev = &pdev->dev;
284 	const struct axg_fifo_match_data *data;
285 	struct axg_fifo *fifo;
286 	struct resource *res;
287 	void __iomem *regs;
288 
289 	data = of_device_get_match_data(dev);
290 	if (!data) {
291 		dev_err(dev, "failed to match device\n");
292 		return -ENODEV;
293 	}
294 
295 	fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
296 	if (!fifo)
297 		return -ENOMEM;
298 	platform_set_drvdata(pdev, fifo);
299 
300 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
301 	regs = devm_ioremap_resource(dev, res);
302 	if (IS_ERR(regs))
303 		return PTR_ERR(regs);
304 
305 	fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
306 	if (IS_ERR(fifo->map)) {
307 		dev_err(dev, "failed to init regmap: %ld\n",
308 			PTR_ERR(fifo->map));
309 		return PTR_ERR(fifo->map);
310 	}
311 
312 	fifo->pclk = devm_clk_get(dev, NULL);
313 	if (IS_ERR(fifo->pclk)) {
314 		if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER)
315 			dev_err(dev, "failed to get pclk: %ld\n",
316 				PTR_ERR(fifo->pclk));
317 		return PTR_ERR(fifo->pclk);
318 	}
319 
320 	fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
321 	if (IS_ERR(fifo->arb)) {
322 		if (PTR_ERR(fifo->arb) != -EPROBE_DEFER)
323 			dev_err(dev, "failed to get arb reset: %ld\n",
324 				PTR_ERR(fifo->arb));
325 		return PTR_ERR(fifo->arb);
326 	}
327 
328 	fifo->irq = of_irq_get(dev->of_node, 0);
329 	if (fifo->irq <= 0) {
330 		dev_err(dev, "failed to get irq: %d\n", fifo->irq);
331 		return fifo->irq;
332 	}
333 
334 	return devm_snd_soc_register_component(dev, data->component_drv,
335 					       data->dai_drv, 1);
336 }
337 EXPORT_SYMBOL_GPL(axg_fifo_probe);
338 
339 MODULE_DESCRIPTION("Amlogic AXG fifo driver");
340 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
341 MODULE_LICENSE("GPL v2");
342