1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/dynamic_debug.h>
5
6 #include "core.h"
7
8 struct pdsc_wait_context {
9 struct pdsc_qcq *qcq;
10 struct completion wait_completion;
11 };
12
pdsc_process_notifyq(struct pdsc_qcq * qcq)13 static int pdsc_process_notifyq(struct pdsc_qcq *qcq)
14 {
15 union pds_core_notifyq_comp *comp;
16 struct pdsc *pdsc = qcq->pdsc;
17 struct pdsc_cq *cq = &qcq->cq;
18 struct pdsc_cq_info *cq_info;
19 int nq_work = 0;
20 u64 eid;
21
22 cq_info = &cq->info[cq->tail_idx];
23 comp = cq_info->comp;
24 eid = le64_to_cpu(comp->event.eid);
25 while (eid > pdsc->last_eid) {
26 u16 ecode = le16_to_cpu(comp->event.ecode);
27
28 switch (ecode) {
29 case PDS_EVENT_LINK_CHANGE:
30 dev_info(pdsc->dev, "NotifyQ LINK_CHANGE ecode %d eid %lld\n",
31 ecode, eid);
32 pdsc_notify(PDS_EVENT_LINK_CHANGE, comp);
33 break;
34
35 case PDS_EVENT_RESET:
36 dev_info(pdsc->dev, "NotifyQ RESET ecode %d eid %lld\n",
37 ecode, eid);
38 pdsc_notify(PDS_EVENT_RESET, comp);
39 break;
40
41 case PDS_EVENT_XCVR:
42 dev_info(pdsc->dev, "NotifyQ XCVR ecode %d eid %lld\n",
43 ecode, eid);
44 break;
45
46 default:
47 dev_info(pdsc->dev, "NotifyQ ecode %d eid %lld\n",
48 ecode, eid);
49 break;
50 }
51
52 pdsc->last_eid = eid;
53 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
54 cq_info = &cq->info[cq->tail_idx];
55 comp = cq_info->comp;
56 eid = le64_to_cpu(comp->event.eid);
57
58 nq_work++;
59 }
60
61 qcq->accum_work += nq_work;
62
63 return nq_work;
64 }
65
pdsc_process_adminq(struct pdsc_qcq * qcq)66 void pdsc_process_adminq(struct pdsc_qcq *qcq)
67 {
68 union pds_core_adminq_comp *comp;
69 struct pdsc_queue *q = &qcq->q;
70 struct pdsc *pdsc = qcq->pdsc;
71 struct pdsc_cq *cq = &qcq->cq;
72 struct pdsc_q_info *q_info;
73 unsigned long irqflags;
74 int nq_work = 0;
75 int aq_work = 0;
76 int credits;
77
78 /* Don't process AdminQ when shutting down */
79 if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
80 dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
81 __func__);
82 return;
83 }
84
85 /* Check for NotifyQ event */
86 nq_work = pdsc_process_notifyq(&pdsc->notifyqcq);
87
88 /* Check for empty queue, which can happen if the interrupt was
89 * for a NotifyQ event and there are no new AdminQ completions.
90 */
91 if (q->tail_idx == q->head_idx)
92 goto credits;
93
94 /* Find the first completion to clean,
95 * run the callback in the related q_info,
96 * and continue while we still match done color
97 */
98 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
99 comp = cq->info[cq->tail_idx].comp;
100 while (pdsc_color_match(comp->color, cq->done_color)) {
101 q_info = &q->info[q->tail_idx];
102 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
103
104 /* Copy out the completion data */
105 memcpy(q_info->dest, comp, sizeof(*comp));
106
107 complete_all(&q_info->wc->wait_completion);
108
109 if (cq->tail_idx == cq->num_descs - 1)
110 cq->done_color = !cq->done_color;
111 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
112 comp = cq->info[cq->tail_idx].comp;
113
114 aq_work++;
115 }
116 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
117
118 qcq->accum_work += aq_work;
119
120 credits:
121 /* Return the interrupt credits, one for each completion */
122 credits = nq_work + aq_work;
123 if (credits)
124 pds_core_intr_credits(&pdsc->intr_ctrl[qcq->intx],
125 credits,
126 PDS_CORE_INTR_CRED_REARM);
127 }
128
pdsc_work_thread(struct work_struct * work)129 void pdsc_work_thread(struct work_struct *work)
130 {
131 struct pdsc_qcq *qcq = container_of(work, struct pdsc_qcq, work);
132
133 pdsc_process_adminq(qcq);
134 }
135
pdsc_adminq_isr(int irq,void * data)136 irqreturn_t pdsc_adminq_isr(int irq, void *data)
137 {
138 struct pdsc_qcq *qcq = data;
139 struct pdsc *pdsc = qcq->pdsc;
140
141 /* Don't process AdminQ when shutting down */
142 if (pdsc->state & BIT_ULL(PDSC_S_STOPPING_DRIVER)) {
143 dev_err(pdsc->dev, "%s: called while PDSC_S_STOPPING_DRIVER\n",
144 __func__);
145 return IRQ_HANDLED;
146 }
147
148 queue_work(pdsc->wq, &qcq->work);
149 pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
150
151 return IRQ_HANDLED;
152 }
153
__pdsc_adminq_post(struct pdsc * pdsc,struct pdsc_qcq * qcq,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp,struct pdsc_wait_context * wc)154 static int __pdsc_adminq_post(struct pdsc *pdsc,
155 struct pdsc_qcq *qcq,
156 union pds_core_adminq_cmd *cmd,
157 union pds_core_adminq_comp *comp,
158 struct pdsc_wait_context *wc)
159 {
160 struct pdsc_queue *q = &qcq->q;
161 struct pdsc_q_info *q_info;
162 unsigned long irqflags;
163 unsigned int avail;
164 int index;
165 int ret;
166
167 spin_lock_irqsave(&pdsc->adminq_lock, irqflags);
168
169 /* Check for space in the queue */
170 avail = q->tail_idx;
171 if (q->head_idx >= avail)
172 avail += q->num_descs - q->head_idx - 1;
173 else
174 avail -= q->head_idx + 1;
175 if (!avail) {
176 ret = -ENOSPC;
177 goto err_out_unlock;
178 }
179
180 /* Check that the FW is running */
181 if (!pdsc_is_fw_running(pdsc)) {
182 u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
183
184 dev_info(pdsc->dev, "%s: post failed - fw not running %#02x:\n",
185 __func__, fw_status);
186 ret = -ENXIO;
187
188 goto err_out_unlock;
189 }
190
191 /* Post the request */
192 index = q->head_idx;
193 q_info = &q->info[index];
194 q_info->wc = wc;
195 q_info->dest = comp;
196 memcpy(q_info->desc, cmd, sizeof(*cmd));
197
198 dev_dbg(pdsc->dev, "head_idx %d tail_idx %d\n",
199 q->head_idx, q->tail_idx);
200 dev_dbg(pdsc->dev, "post admin queue command:\n");
201 dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
202 cmd, sizeof(*cmd), true);
203
204 q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
205
206 pds_core_dbell_ring(pdsc->kern_dbpage,
207 q->hw_type, q->dbval | q->head_idx);
208 ret = index;
209
210 err_out_unlock:
211 spin_unlock_irqrestore(&pdsc->adminq_lock, irqflags);
212 return ret;
213 }
214
pdsc_adminq_post(struct pdsc * pdsc,union pds_core_adminq_cmd * cmd,union pds_core_adminq_comp * comp,bool fast_poll)215 int pdsc_adminq_post(struct pdsc *pdsc,
216 union pds_core_adminq_cmd *cmd,
217 union pds_core_adminq_comp *comp,
218 bool fast_poll)
219 {
220 struct pdsc_wait_context wc = {
221 .wait_completion =
222 COMPLETION_INITIALIZER_ONSTACK(wc.wait_completion),
223 };
224 unsigned long poll_interval = 1;
225 unsigned long poll_jiffies;
226 unsigned long time_limit;
227 unsigned long time_start;
228 unsigned long time_done;
229 unsigned long remaining;
230 int err = 0;
231 int index;
232
233 wc.qcq = &pdsc->adminqcq;
234 index = __pdsc_adminq_post(pdsc, &pdsc->adminqcq, cmd, comp, &wc);
235 if (index < 0) {
236 err = index;
237 goto err_out;
238 }
239
240 time_start = jiffies;
241 time_limit = time_start + HZ * pdsc->devcmd_timeout;
242 do {
243 /* Timeslice the actual wait to catch IO errors etc early */
244 poll_jiffies = msecs_to_jiffies(poll_interval);
245 remaining = wait_for_completion_timeout(&wc.wait_completion,
246 poll_jiffies);
247 if (remaining)
248 break;
249
250 if (!pdsc_is_fw_running(pdsc)) {
251 u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
252
253 dev_dbg(pdsc->dev, "%s: post wait failed - fw not running %#02x:\n",
254 __func__, fw_status);
255 err = -ENXIO;
256 break;
257 }
258
259 /* When fast_poll is not requested, prevent aggressive polling
260 * on failures due to timeouts by doing exponential back off.
261 */
262 if (!fast_poll && poll_interval < PDSC_ADMINQ_MAX_POLL_INTERVAL)
263 poll_interval <<= 1;
264 } while (time_before(jiffies, time_limit));
265 time_done = jiffies;
266 dev_dbg(pdsc->dev, "%s: elapsed %d msecs\n",
267 __func__, jiffies_to_msecs(time_done - time_start));
268
269 /* Check the results */
270 if (time_after_eq(time_done, time_limit))
271 err = -ETIMEDOUT;
272
273 dev_dbg(pdsc->dev, "read admin queue completion idx %d:\n", index);
274 dynamic_hex_dump("comp ", DUMP_PREFIX_OFFSET, 16, 1,
275 comp, sizeof(*comp), true);
276
277 if (remaining && comp->status)
278 err = pdsc_err_to_errno(comp->status);
279
280 err_out:
281 if (err) {
282 dev_dbg(pdsc->dev, "%s: opcode %d status %d err %pe\n",
283 __func__, cmd->opcode, comp->status, ERR_PTR(err));
284 if (err == -ENXIO || err == -ETIMEDOUT)
285 queue_work(pdsc->wq, &pdsc->health_work);
286 }
287
288 return err;
289 }
290 EXPORT_SYMBOL_GPL(pdsc_adminq_post);
291