1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Copyright(c) 2021-2022 Intel Corporation. All rights reserved.
4 //
5 // Authors: Cezary Rojewski <cezary.rojewski@intel.com>
6 // Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>
7 //
8 // Special thanks to:
9 // Krzysztof Hejmowski <krzysztof.hejmowski@intel.com>
10 // Michal Sienkiewicz <michal.sienkiewicz@intel.com>
11 // Filip Proborszcz
12 //
13 // for sharing Intel AudioDSP expertise and helping shape the very
14 // foundation of this driver
15 //
16
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <sound/hda_codec.h>
20 #include <sound/hda_i915.h>
21 #include <sound/hda_register.h>
22 #include <sound/hdaudio.h>
23 #include <sound/hdaudio_ext.h>
24 #include <sound/intel-dsp-config.h>
25 #include <sound/intel-nhlt.h>
26 #include "../../codecs/hda.h"
27 #include "avs.h"
28 #include "cldma.h"
29
30 static void
avs_hda_update_config_dword(struct hdac_bus * bus,u32 reg,u32 mask,u32 value)31 avs_hda_update_config_dword(struct hdac_bus *bus, u32 reg, u32 mask, u32 value)
32 {
33 struct pci_dev *pci = to_pci_dev(bus->dev);
34 u32 data;
35
36 pci_read_config_dword(pci, reg, &data);
37 data &= ~mask;
38 data |= (value & mask);
39 pci_write_config_dword(pci, reg, data);
40 }
41
avs_hda_power_gating_enable(struct avs_dev * adev,bool enable)42 void avs_hda_power_gating_enable(struct avs_dev *adev, bool enable)
43 {
44 u32 value;
45
46 value = enable ? 0 : AZX_PGCTL_LSRMD_MASK;
47 avs_hda_update_config_dword(&adev->base.core, AZX_PCIREG_PGCTL,
48 AZX_PGCTL_LSRMD_MASK, value);
49 }
50
avs_hdac_clock_gating_enable(struct hdac_bus * bus,bool enable)51 static void avs_hdac_clock_gating_enable(struct hdac_bus *bus, bool enable)
52 {
53 u32 value;
54
55 value = enable ? AZX_CGCTL_MISCBDCGE_MASK : 0;
56 avs_hda_update_config_dword(bus, AZX_PCIREG_CGCTL, AZX_CGCTL_MISCBDCGE_MASK, value);
57 }
58
avs_hda_clock_gating_enable(struct avs_dev * adev,bool enable)59 void avs_hda_clock_gating_enable(struct avs_dev *adev, bool enable)
60 {
61 avs_hdac_clock_gating_enable(&adev->base.core, enable);
62 }
63
avs_hda_l1sen_enable(struct avs_dev * adev,bool enable)64 void avs_hda_l1sen_enable(struct avs_dev *adev, bool enable)
65 {
66 u32 value;
67
68 value = enable ? AZX_VS_EM2_L1SEN : 0;
69 snd_hdac_chip_updatel(&adev->base.core, VS_EM2, AZX_VS_EM2_L1SEN, value);
70 }
71
avs_hdac_bus_init_streams(struct hdac_bus * bus)72 static int avs_hdac_bus_init_streams(struct hdac_bus *bus)
73 {
74 unsigned int cp_streams, pb_streams;
75 unsigned int gcap;
76
77 gcap = snd_hdac_chip_readw(bus, GCAP);
78 cp_streams = (gcap >> 8) & 0x0F;
79 pb_streams = (gcap >> 12) & 0x0F;
80 bus->num_streams = cp_streams + pb_streams;
81
82 snd_hdac_ext_stream_init_all(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
83 snd_hdac_ext_stream_init_all(bus, cp_streams, pb_streams, SNDRV_PCM_STREAM_PLAYBACK);
84
85 return snd_hdac_bus_alloc_stream_pages(bus);
86 }
87
avs_hdac_bus_init_chip(struct hdac_bus * bus,bool full_reset)88 static bool avs_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
89 {
90 struct hdac_ext_link *hlink;
91 bool ret;
92
93 avs_hdac_clock_gating_enable(bus, false);
94 ret = snd_hdac_bus_init_chip(bus, full_reset);
95
96 /* Reset stream-to-link mapping */
97 list_for_each_entry(hlink, &bus->hlink_list, list)
98 writel(0, hlink->ml_addr + AZX_REG_ML_LOSIDV);
99
100 avs_hdac_clock_gating_enable(bus, true);
101
102 /* Set DUM bit to address incorrect position reporting for capture
103 * streams. In order to do so, CTRL needs to be out of reset state
104 */
105 snd_hdac_chip_updatel(bus, VS_EM2, AZX_VS_EM2_DUM, AZX_VS_EM2_DUM);
106
107 return ret;
108 }
109
probe_codec(struct hdac_bus * bus,int addr)110 static int probe_codec(struct hdac_bus *bus, int addr)
111 {
112 struct hda_codec *codec;
113 unsigned int cmd = (addr << 28) | (AC_NODE_ROOT << 20) |
114 (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
115 unsigned int res = -1;
116 int ret;
117
118 mutex_lock(&bus->cmd_mutex);
119 snd_hdac_bus_send_cmd(bus, cmd);
120 snd_hdac_bus_get_response(bus, addr, &res);
121 mutex_unlock(&bus->cmd_mutex);
122 if (res == -1)
123 return -EIO;
124
125 dev_dbg(bus->dev, "codec #%d probed OK: 0x%x\n", addr, res);
126
127 codec = snd_hda_codec_device_init(to_hda_bus(bus), addr, "hdaudioB%dD%d", bus->idx, addr);
128 if (IS_ERR(codec)) {
129 dev_err(bus->dev, "init codec failed: %ld\n", PTR_ERR(codec));
130 return PTR_ERR(codec);
131 }
132 /*
133 * Allow avs_core suspend by forcing suspended state on all
134 * of its codec child devices. Component interested in
135 * dealing with hda codecs directly takes pm responsibilities
136 */
137 pm_runtime_set_suspended(hda_codec_dev(codec));
138
139 /* configure effectively creates new ASoC component */
140 ret = snd_hda_codec_configure(codec);
141 if (ret < 0) {
142 dev_err(bus->dev, "failed to config codec %d\n", ret);
143 return ret;
144 }
145
146 return 0;
147 }
148
avs_hdac_bus_probe_codecs(struct hdac_bus * bus)149 static void avs_hdac_bus_probe_codecs(struct hdac_bus *bus)
150 {
151 int c;
152
153 /* First try to probe all given codec slots */
154 for (c = 0; c < HDA_MAX_CODECS; c++) {
155 if (!(bus->codec_mask & BIT(c)))
156 continue;
157
158 if (!probe_codec(bus, c))
159 /* success, continue probing */
160 continue;
161
162 /*
163 * Some BIOSen give you wrong codec addresses
164 * that don't exist
165 */
166 dev_warn(bus->dev, "Codec #%d probe error; disabling it...\n", c);
167 bus->codec_mask &= ~BIT(c);
168 /*
169 * More badly, accessing to a non-existing
170 * codec often screws up the controller bus,
171 * and disturbs the further communications.
172 * Thus if an error occurs during probing,
173 * better to reset the controller bus to get
174 * back to the sanity state.
175 */
176 snd_hdac_bus_stop_chip(bus);
177 avs_hdac_bus_init_chip(bus, true);
178 }
179 }
180
avs_hda_probe_work(struct work_struct * work)181 static void avs_hda_probe_work(struct work_struct *work)
182 {
183 struct avs_dev *adev = container_of(work, struct avs_dev, probe_work);
184 struct hdac_bus *bus = &adev->base.core;
185 struct hdac_ext_link *hlink;
186 int ret;
187
188 pm_runtime_set_active(bus->dev); /* clear runtime_error flag */
189
190 ret = snd_hdac_i915_init(bus);
191 if (ret < 0)
192 dev_info(bus->dev, "i915 init unsuccessful: %d\n", ret);
193
194 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
195 avs_hdac_bus_init_chip(bus, true);
196 avs_hdac_bus_probe_codecs(bus);
197 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
198
199 /* with all codecs probed, links can be powered down */
200 list_for_each_entry(hlink, &bus->hlink_list, list)
201 snd_hdac_ext_bus_link_put(bus, hlink);
202
203 snd_hdac_ext_bus_ppcap_enable(bus, true);
204 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
205
206 ret = avs_dsp_first_boot_firmware(adev);
207 if (ret < 0)
208 return;
209
210 adev->nhlt = intel_nhlt_init(adev->dev);
211 if (!adev->nhlt)
212 dev_info(bus->dev, "platform has no NHLT\n");
213
214 avs_register_all_boards(adev);
215
216 /* configure PM */
217 pm_runtime_set_autosuspend_delay(bus->dev, 2000);
218 pm_runtime_use_autosuspend(bus->dev);
219 pm_runtime_mark_last_busy(bus->dev);
220 pm_runtime_put_autosuspend(bus->dev);
221 pm_runtime_allow(bus->dev);
222 }
223
hdac_stream_update_pos(struct hdac_stream * stream,u64 buffer_size)224 static void hdac_stream_update_pos(struct hdac_stream *stream, u64 buffer_size)
225 {
226 u64 prev_pos, pos, num_bytes;
227
228 div64_u64_rem(stream->curr_pos, buffer_size, &prev_pos);
229 pos = snd_hdac_stream_get_pos_posbuf(stream);
230
231 if (pos < prev_pos)
232 num_bytes = (buffer_size - prev_pos) + pos;
233 else
234 num_bytes = pos - prev_pos;
235
236 stream->curr_pos += num_bytes;
237 }
238
239 /* called from IRQ */
hdac_update_stream(struct hdac_bus * bus,struct hdac_stream * stream)240 static void hdac_update_stream(struct hdac_bus *bus, struct hdac_stream *stream)
241 {
242 if (stream->substream) {
243 snd_pcm_period_elapsed(stream->substream);
244 } else if (stream->cstream) {
245 u64 buffer_size = stream->cstream->runtime->buffer_size;
246
247 hdac_stream_update_pos(stream, buffer_size);
248 snd_compr_fragment_elapsed(stream->cstream);
249 }
250 }
251
hdac_bus_irq_handler(int irq,void * context)252 static irqreturn_t hdac_bus_irq_handler(int irq, void *context)
253 {
254 struct hdac_bus *bus = context;
255 u32 mask, int_enable;
256 u32 status;
257 int ret = IRQ_NONE;
258
259 if (!pm_runtime_active(bus->dev))
260 return ret;
261
262 spin_lock(&bus->reg_lock);
263
264 status = snd_hdac_chip_readl(bus, INTSTS);
265 if (status == 0 || status == UINT_MAX) {
266 spin_unlock(&bus->reg_lock);
267 return ret;
268 }
269
270 /* clear rirb int */
271 status = snd_hdac_chip_readb(bus, RIRBSTS);
272 if (status & RIRB_INT_MASK) {
273 if (status & RIRB_INT_RESPONSE)
274 snd_hdac_bus_update_rirb(bus);
275 snd_hdac_chip_writeb(bus, RIRBSTS, RIRB_INT_MASK);
276 }
277
278 mask = (0x1 << bus->num_streams) - 1;
279
280 status = snd_hdac_chip_readl(bus, INTSTS);
281 status &= mask;
282 if (status) {
283 /* Disable stream interrupts; Re-enable in bottom half */
284 int_enable = snd_hdac_chip_readl(bus, INTCTL);
285 snd_hdac_chip_writel(bus, INTCTL, (int_enable & (~mask)));
286 ret = IRQ_WAKE_THREAD;
287 } else {
288 ret = IRQ_HANDLED;
289 }
290
291 spin_unlock(&bus->reg_lock);
292 return ret;
293 }
294
hdac_bus_irq_thread(int irq,void * context)295 static irqreturn_t hdac_bus_irq_thread(int irq, void *context)
296 {
297 struct hdac_bus *bus = context;
298 u32 status;
299 u32 int_enable;
300 u32 mask;
301 unsigned long flags;
302
303 status = snd_hdac_chip_readl(bus, INTSTS);
304
305 snd_hdac_bus_handle_stream_irq(bus, status, hdac_update_stream);
306
307 /* Re-enable stream interrupts */
308 mask = (0x1 << bus->num_streams) - 1;
309 spin_lock_irqsave(&bus->reg_lock, flags);
310 int_enable = snd_hdac_chip_readl(bus, INTCTL);
311 snd_hdac_chip_writel(bus, INTCTL, (int_enable | mask));
312 spin_unlock_irqrestore(&bus->reg_lock, flags);
313
314 return IRQ_HANDLED;
315 }
316
avs_hdac_acquire_irq(struct avs_dev * adev)317 static int avs_hdac_acquire_irq(struct avs_dev *adev)
318 {
319 struct hdac_bus *bus = &adev->base.core;
320 struct pci_dev *pci = to_pci_dev(bus->dev);
321 int ret;
322
323 /* request one and check that we only got one interrupt */
324 ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
325 if (ret != 1) {
326 dev_err(adev->dev, "Failed to allocate IRQ vector: %d\n", ret);
327 return ret;
328 }
329
330 ret = pci_request_irq(pci, 0, hdac_bus_irq_handler, hdac_bus_irq_thread, bus,
331 KBUILD_MODNAME);
332 if (ret < 0) {
333 dev_err(adev->dev, "Failed to request stream IRQ handler: %d\n", ret);
334 goto free_vector;
335 }
336
337 ret = pci_request_irq(pci, 0, avs_dsp_irq_handler, avs_dsp_irq_thread, adev,
338 KBUILD_MODNAME);
339 if (ret < 0) {
340 dev_err(adev->dev, "Failed to request IPC IRQ handler: %d\n", ret);
341 goto free_stream_irq;
342 }
343
344 return 0;
345
346 free_stream_irq:
347 pci_free_irq(pci, 0, bus);
348 free_vector:
349 pci_free_irq_vectors(pci);
350 return ret;
351 }
352
avs_bus_init(struct avs_dev * adev,struct pci_dev * pci,const struct pci_device_id * id)353 static int avs_bus_init(struct avs_dev *adev, struct pci_dev *pci, const struct pci_device_id *id)
354 {
355 struct hda_bus *bus = &adev->base;
356 struct avs_ipc *ipc;
357 struct device *dev = &pci->dev;
358 int ret;
359
360 ret = snd_hdac_ext_bus_init(&bus->core, dev, NULL, &soc_hda_ext_bus_ops);
361 if (ret < 0)
362 return ret;
363
364 bus->core.use_posbuf = 1;
365 bus->core.bdl_pos_adj = 0;
366 bus->core.sync_write = 1;
367 bus->pci = pci;
368 bus->mixer_assigned = -1;
369 mutex_init(&bus->prepare_mutex);
370
371 ipc = devm_kzalloc(dev, sizeof(*ipc), GFP_KERNEL);
372 if (!ipc)
373 return -ENOMEM;
374 ret = avs_ipc_init(ipc, dev);
375 if (ret < 0)
376 return ret;
377
378 adev->dev = dev;
379 adev->spec = (const struct avs_spec *)id->driver_data;
380 adev->ipc = ipc;
381 adev->hw_cfg.dsp_cores = hweight_long(AVS_MAIN_CORE_MASK);
382 INIT_WORK(&adev->probe_work, avs_hda_probe_work);
383 INIT_LIST_HEAD(&adev->comp_list);
384 INIT_LIST_HEAD(&adev->path_list);
385 INIT_LIST_HEAD(&adev->fw_list);
386 init_completion(&adev->fw_ready);
387 spin_lock_init(&adev->path_list_lock);
388 mutex_init(&adev->modres_mutex);
389 mutex_init(&adev->comp_list_mutex);
390 mutex_init(&adev->path_mutex);
391
392 return 0;
393 }
394
avs_pci_probe(struct pci_dev * pci,const struct pci_device_id * id)395 static int avs_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
396 {
397 struct hdac_bus *bus;
398 struct avs_dev *adev;
399 struct device *dev = &pci->dev;
400 int ret;
401
402 ret = snd_intel_dsp_driver_probe(pci);
403 if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_AVS)
404 return -ENODEV;
405
406 ret = pcim_enable_device(pci);
407 if (ret < 0)
408 return ret;
409
410 adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
411 if (!adev)
412 return -ENOMEM;
413 ret = avs_bus_init(adev, pci, id);
414 if (ret < 0) {
415 dev_err(dev, "failed to init avs bus: %d\n", ret);
416 return ret;
417 }
418
419 ret = pci_request_regions(pci, "AVS HDAudio");
420 if (ret < 0)
421 return ret;
422
423 bus = &adev->base.core;
424 bus->addr = pci_resource_start(pci, 0);
425 bus->remap_addr = pci_ioremap_bar(pci, 0);
426 if (!bus->remap_addr) {
427 dev_err(bus->dev, "ioremap error\n");
428 ret = -ENXIO;
429 goto err_remap_bar0;
430 }
431
432 adev->dsp_ba = pci_ioremap_bar(pci, 4);
433 if (!adev->dsp_ba) {
434 dev_err(bus->dev, "ioremap error\n");
435 ret = -ENXIO;
436 goto err_remap_bar4;
437 }
438
439 snd_hdac_bus_parse_capabilities(bus);
440 if (bus->mlcap)
441 snd_hdac_ext_bus_get_ml_capabilities(bus);
442
443 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
444 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
445 dma_set_max_seg_size(dev, UINT_MAX);
446
447 ret = avs_hdac_bus_init_streams(bus);
448 if (ret < 0) {
449 dev_err(dev, "failed to init streams: %d\n", ret);
450 goto err_init_streams;
451 }
452
453 ret = avs_hdac_acquire_irq(adev);
454 if (ret < 0) {
455 dev_err(bus->dev, "failed to acquire irq: %d\n", ret);
456 goto err_acquire_irq;
457 }
458
459 pci_set_master(pci);
460 pci_set_drvdata(pci, bus);
461 device_disable_async_suspend(dev);
462
463 schedule_work(&adev->probe_work);
464
465 return 0;
466
467 err_acquire_irq:
468 snd_hdac_bus_free_stream_pages(bus);
469 snd_hdac_ext_stream_free_all(bus);
470 err_init_streams:
471 iounmap(adev->dsp_ba);
472 err_remap_bar4:
473 iounmap(bus->remap_addr);
474 err_remap_bar0:
475 pci_release_regions(pci);
476 return ret;
477 }
478
avs_pci_remove(struct pci_dev * pci)479 static void avs_pci_remove(struct pci_dev *pci)
480 {
481 struct hdac_device *hdev, *save;
482 struct hdac_bus *bus = pci_get_drvdata(pci);
483 struct avs_dev *adev = hdac_to_avs(bus);
484
485 cancel_work_sync(&adev->probe_work);
486 avs_ipc_block(adev->ipc);
487
488 avs_unregister_all_boards(adev);
489
490 if (adev->nhlt)
491 intel_nhlt_free(adev->nhlt);
492
493 if (avs_platattr_test(adev, CLDMA))
494 hda_cldma_free(&code_loader);
495
496 snd_hdac_stop_streams_and_chip(bus);
497 avs_dsp_op(adev, int_control, false);
498 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
499
500 /* it is safe to remove all codecs from the system now */
501 list_for_each_entry_safe(hdev, save, &bus->codec_list, list)
502 snd_hda_codec_unregister(hdac_to_hda_codec(hdev));
503
504 snd_hdac_bus_free_stream_pages(bus);
505 snd_hdac_ext_stream_free_all(bus);
506 /* reverse ml_capabilities */
507 snd_hdac_link_free_all(bus);
508 snd_hdac_ext_bus_exit(bus);
509
510 avs_dsp_core_disable(adev, GENMASK(adev->hw_cfg.dsp_cores - 1, 0));
511 snd_hdac_ext_bus_ppcap_enable(bus, false);
512
513 /* snd_hdac_stop_streams_and_chip does that already? */
514 snd_hdac_bus_stop_chip(bus);
515 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
516 if (bus->audio_component)
517 snd_hdac_i915_exit(bus);
518
519 avs_module_info_free(adev);
520 pci_free_irq(pci, 0, adev);
521 pci_free_irq(pci, 0, bus);
522 pci_free_irq_vectors(pci);
523 iounmap(bus->remap_addr);
524 iounmap(adev->dsp_ba);
525 pci_release_regions(pci);
526
527 /* Firmware is not needed anymore */
528 avs_release_firmwares(adev);
529
530 /* pm_runtime_forbid() can rpm_resume() which we do not want */
531 pm_runtime_disable(&pci->dev);
532 pm_runtime_forbid(&pci->dev);
533 pm_runtime_enable(&pci->dev);
534 pm_runtime_get_noresume(&pci->dev);
535 }
536
avs_suspend_common(struct avs_dev * adev)537 static int __maybe_unused avs_suspend_common(struct avs_dev *adev)
538 {
539 struct hdac_bus *bus = &adev->base.core;
540 int ret;
541
542 flush_work(&adev->probe_work);
543
544 snd_hdac_ext_bus_link_power_down_all(bus);
545
546 ret = avs_ipc_set_dx(adev, AVS_MAIN_CORE_MASK, false);
547 /*
548 * pm_runtime is blocked on DSP failure but system-wide suspend is not.
549 * Do not block entire system from suspending if that's the case.
550 */
551 if (ret && ret != -EPERM) {
552 dev_err(adev->dev, "set dx failed: %d\n", ret);
553 return AVS_IPC_RET(ret);
554 }
555
556 avs_ipc_block(adev->ipc);
557 avs_dsp_op(adev, int_control, false);
558 snd_hdac_ext_bus_ppcap_int_enable(bus, false);
559
560 ret = avs_dsp_core_disable(adev, AVS_MAIN_CORE_MASK);
561 if (ret < 0) {
562 dev_err(adev->dev, "core_mask %ld disable failed: %d\n", AVS_MAIN_CORE_MASK, ret);
563 return ret;
564 }
565
566 snd_hdac_ext_bus_ppcap_enable(bus, false);
567 /* disable LP SRAM retention */
568 avs_hda_power_gating_enable(adev, false);
569 snd_hdac_bus_stop_chip(bus);
570 /* disable CG when putting controller to reset */
571 avs_hdac_clock_gating_enable(bus, false);
572 snd_hdac_bus_enter_link_reset(bus);
573 avs_hdac_clock_gating_enable(bus, true);
574
575 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
576
577 return 0;
578 }
579
avs_resume_common(struct avs_dev * adev,bool purge)580 static int __maybe_unused avs_resume_common(struct avs_dev *adev, bool purge)
581 {
582 struct hdac_bus *bus = &adev->base.core;
583 struct hdac_ext_link *hlink;
584 int ret;
585
586 snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
587 avs_hdac_bus_init_chip(bus, true);
588
589 snd_hdac_ext_bus_ppcap_enable(bus, true);
590 snd_hdac_ext_bus_ppcap_int_enable(bus, true);
591
592 ret = avs_dsp_boot_firmware(adev, purge);
593 if (ret < 0) {
594 dev_err(adev->dev, "firmware boot failed: %d\n", ret);
595 return ret;
596 }
597
598 /* turn off the links that were off before suspend */
599 list_for_each_entry(hlink, &bus->hlink_list, list) {
600 if (!hlink->ref_count)
601 snd_hdac_ext_bus_link_power_down(hlink);
602 }
603
604 /* check dma status and clean up CORB/RIRB buffers */
605 if (!bus->cmd_dma_state)
606 snd_hdac_bus_stop_cmd_io(bus);
607
608 return 0;
609 }
610
avs_suspend(struct device * dev)611 static int __maybe_unused avs_suspend(struct device *dev)
612 {
613 return avs_suspend_common(to_avs_dev(dev));
614 }
615
avs_resume(struct device * dev)616 static int __maybe_unused avs_resume(struct device *dev)
617 {
618 return avs_resume_common(to_avs_dev(dev), true);
619 }
620
avs_runtime_suspend(struct device * dev)621 static int __maybe_unused avs_runtime_suspend(struct device *dev)
622 {
623 return avs_suspend_common(to_avs_dev(dev));
624 }
625
avs_runtime_resume(struct device * dev)626 static int __maybe_unused avs_runtime_resume(struct device *dev)
627 {
628 return avs_resume_common(to_avs_dev(dev), true);
629 }
630
631 static const struct dev_pm_ops avs_dev_pm = {
632 SET_SYSTEM_SLEEP_PM_OPS(avs_suspend, avs_resume)
633 SET_RUNTIME_PM_OPS(avs_runtime_suspend, avs_runtime_resume, NULL)
634 };
635
636 static const struct avs_spec skl_desc = {
637 .name = "skl",
638 .min_fw_version = {
639 .major = 9,
640 .minor = 21,
641 .hotfix = 0,
642 .build = 4732,
643 },
644 .dsp_ops = &skl_dsp_ops,
645 .core_init_mask = 1,
646 .attributes = AVS_PLATATTR_CLDMA,
647 .sram_base_offset = SKL_ADSP_SRAM_BASE_OFFSET,
648 .sram_window_size = SKL_ADSP_SRAM_WINDOW_SIZE,
649 .rom_status = SKL_ADSP_SRAM_BASE_OFFSET,
650 };
651
652 static const struct avs_spec apl_desc = {
653 .name = "apl",
654 .min_fw_version = {
655 .major = 9,
656 .minor = 22,
657 .hotfix = 1,
658 .build = 4323,
659 },
660 .dsp_ops = &apl_dsp_ops,
661 .core_init_mask = 3,
662 .attributes = AVS_PLATATTR_IMR,
663 .sram_base_offset = APL_ADSP_SRAM_BASE_OFFSET,
664 .sram_window_size = APL_ADSP_SRAM_WINDOW_SIZE,
665 .rom_status = APL_ADSP_SRAM_BASE_OFFSET,
666 };
667
668 static const struct pci_device_id avs_ids[] = {
669 { PCI_VDEVICE(INTEL, 0x9d70), (unsigned long)&skl_desc }, /* SKL */
670 { PCI_VDEVICE(INTEL, 0x9d71), (unsigned long)&skl_desc }, /* KBL */
671 { PCI_VDEVICE(INTEL, 0x5a98), (unsigned long)&apl_desc }, /* APL */
672 { PCI_VDEVICE(INTEL, 0x3198), (unsigned long)&apl_desc }, /* GML */
673 { 0 }
674 };
675 MODULE_DEVICE_TABLE(pci, avs_ids);
676
677 static struct pci_driver avs_pci_driver = {
678 .name = KBUILD_MODNAME,
679 .id_table = avs_ids,
680 .probe = avs_pci_probe,
681 .remove = avs_pci_remove,
682 .driver = {
683 .pm = &avs_dev_pm,
684 },
685 };
686 module_pci_driver(avs_pci_driver);
687
688 MODULE_AUTHOR("Cezary Rojewski <cezary.rojewski@intel.com>");
689 MODULE_AUTHOR("Amadeusz Slawinski <amadeuszx.slawinski@linux.intel.com>");
690 MODULE_DESCRIPTION("Intel cAVS sound driver");
691 MODULE_LICENSE("GPL");
692