1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 //         Keyon Jie <yang.jie@linux.intel.com>
7 
8 #include <sof/audio/buffer.h>
9 #include <sof/audio/component_ext.h>
10 #include <sof/audio/pipeline.h>
11 #include <sof/drivers/interrupt.h>
12 #include <sof/lib/agent.h>
13 #include <sof/list.h>
14 #include <sof/schedule/ll_schedule.h>
15 #include <sof/schedule/schedule.h>
16 #include <sof/schedule/task.h>
17 #include <sof/spinlock.h>
18 #include <sof/string.h>
19 #include <ipc/header.h>
20 #include <ipc/stream.h>
21 #include <ipc/topology.h>
22 #include <errno.h>
23 #include <stdbool.h>
24 #include <stddef.h>
25 #include <stdint.h>
26 
27 /* f11818eb-e92e-4082-82a3-dc54c604ebb3 */
28 DECLARE_SOF_UUID("pipe-task", pipe_task_uuid, 0xf11818eb, 0xe92e, 0x4082,
29 		 0x82,  0xa3, 0xdc, 0x54, 0xc6, 0x04, 0xeb, 0xb3);
30 
pipeline_schedule_cancel(struct pipeline * p)31 static void pipeline_schedule_cancel(struct pipeline *p)
32 {
33 	schedule_task_cancel(p->pipe_task);
34 
35 	/* enable system agent panic, when there are no longer
36 	 * DMA driven pipelines
37 	 */
38 	if (!pipeline_is_timer_driven(p))
39 		sa_set_panic_on_delay(true);
40 }
41 
pipeline_task(void * arg)42 static enum task_state pipeline_task(void *arg)
43 {
44 	struct pipeline *p = arg;
45 	int err;
46 
47 	pipe_dbg(p, "pipeline_task()");
48 
49 	/* are we in xrun ? */
50 	if (p->xrun_bytes) {
51 		/* try to recover */
52 		err = pipeline_xrun_recover(p);
53 		if (err < 0)
54 			/* skip copy if still in xrun */
55 			return SOF_TASK_STATE_COMPLETED;
56 	}
57 
58 	err = pipeline_copy(p);
59 	if (err < 0) {
60 		/* try to recover */
61 		err = pipeline_xrun_recover(p);
62 		if (err < 0) {
63 			pipe_err(p, "pipeline_task(): xrun recover failed! pipeline will be stopped!");
64 			/* failed - host will stop this pipeline */
65 			return SOF_TASK_STATE_COMPLETED;
66 		}
67 	}
68 
69 	pipe_dbg(p, "pipeline_task() sched");
70 
71 	return SOF_TASK_STATE_RESCHEDULE;
72 }
73 
pipeline_task_init(struct pipeline * p,uint32_t type)74 static struct task *pipeline_task_init(struct pipeline *p, uint32_t type)
75 {
76 	struct pipeline_task *task = NULL;
77 
78 	task = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM,
79 		       sizeof(*task));
80 	if (!task)
81 		return NULL;
82 
83 	if (schedule_task_init_ll(&task->task, SOF_UUID(pipe_task_uuid), type,
84 				  p->priority, pipeline_task,
85 				  p, p->core, 0) < 0) {
86 		rfree(task);
87 		return NULL;
88 	}
89 
90 	task->sched_comp = p->sched_comp;
91 	task->registrable = p == p->sched_comp->pipeline;
92 
93 	return &task->task;
94 }
95 
pipeline_schedule_config(struct pipeline * p,uint32_t sched_id,uint32_t core,uint32_t period,uint32_t period_mips,uint32_t frames_per_sched,uint32_t time_domain)96 int pipeline_schedule_config(struct pipeline *p, uint32_t sched_id,
97 			     uint32_t core, uint32_t period,
98 			     uint32_t period_mips, uint32_t frames_per_sched,
99 			     uint32_t time_domain)
100 {
101 	p->sched_id = sched_id;
102 	p->core = core;
103 	p->period = period;
104 	p->period_mips = period_mips;
105 	p->frames_per_sched = frames_per_sched;
106 	p->time_domain = time_domain;
107 	return 0;
108 }
109 
pipeline_schedule_triggered(struct pipeline_walk_context * ctx,int cmd)110 void pipeline_schedule_triggered(struct pipeline_walk_context *ctx,
111 				 int cmd)
112 {
113 	struct list_item *tlist;
114 	struct pipeline *p;
115 	uint32_t flags;
116 
117 	/*
118 	 * Interrupts have to be disabled while adding tasks to or removing them
119 	 * from the scheduler list. Without that scheduling can begin
120 	 * immediately before all pipelines achieved a consistent state.
121 	 */
122 	irq_local_disable(flags);
123 
124 	list_for_item(tlist, &ctx->pipelines) {
125 		p = container_of(tlist, struct pipeline, list);
126 
127 		switch (cmd) {
128 		case COMP_TRIGGER_PAUSE:
129 		case COMP_TRIGGER_STOP:
130 			pipeline_schedule_cancel(p);
131 			p->status = COMP_STATE_PAUSED;
132 			break;
133 		case COMP_TRIGGER_RELEASE:
134 		case COMP_TRIGGER_START:
135 			pipeline_schedule_copy(p, 0);
136 			p->xrun_bytes = 0;
137 			p->status = COMP_STATE_ACTIVE;
138 			break;
139 		case COMP_TRIGGER_SUSPEND:
140 		case COMP_TRIGGER_RESUME:
141 		default:
142 			break;
143 		}
144 	}
145 
146 	irq_local_enable(flags);
147 }
148 
pipeline_comp_task_init(struct pipeline * p)149 int pipeline_comp_task_init(struct pipeline *p)
150 {
151 	uint32_t type;
152 
153 	/* initialize task if necessary */
154 	if (!p->pipe_task) {
155 		/* right now we always consider pipeline as a low latency
156 		 * component, but it may change in the future
157 		 */
158 		type = pipeline_is_timer_driven(p) ? SOF_SCHEDULE_LL_TIMER :
159 			SOF_SCHEDULE_LL_DMA;
160 
161 		p->pipe_task = pipeline_task_init(p, type);
162 		if (!p->pipe_task) {
163 			pipe_err(p, "pipeline_comp_task_init(): task init failed");
164 			return -ENOMEM;
165 		}
166 	}
167 
168 	return 0;
169 }
170 
pipeline_comp_trigger_sched_comp(struct pipeline * p,struct comp_dev * comp,struct pipeline_walk_context * ctx)171 void pipeline_comp_trigger_sched_comp(struct pipeline *p,
172 				      struct comp_dev *comp,
173 				      struct pipeline_walk_context *ctx)
174 {
175 	/* only required by the scheduling component or sink component
176 	 * on pipeline without one
177 	 */
178 	if (dev_comp_id(p->sched_comp) != dev_comp_id(comp) &&
179 	    (pipeline_id(p) == pipeline_id(p->sched_comp->pipeline) ||
180 	     dev_comp_id(p->sink_comp) != dev_comp_id(comp)))
181 		return;
182 
183 	/* add for later schedule */
184 	list_item_append(&p->list, &ctx->pipelines);
185 }
186 
187 /* notify pipeline that this component requires buffers emptied/filled */
pipeline_schedule_copy(struct pipeline * p,uint64_t start)188 void pipeline_schedule_copy(struct pipeline *p, uint64_t start)
189 {
190 	/* disable system agent panic for DMA driven pipelines */
191 	if (!pipeline_is_timer_driven(p))
192 		sa_set_panic_on_delay(false);
193 
194 	schedule_task(p->pipe_task, start, p->period);
195 }
196