1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2018-2023 Intel Corporation
4 */
5 #include <linux/firmware.h>
6 #include "iwl-drv.h"
7 #include "iwl-trans.h"
8 #include "iwl-dbg-tlv.h"
9 #include "fw/dbg.h"
10 #include "fw/runtime.h"
11
12 /**
13 * enum iwl_dbg_tlv_type - debug TLV types
14 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
15 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
16 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
17 * @IWL_DBG_TLV_TYPE_REGION: region TLV
18 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
19 * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV
20 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
21 */
22 enum iwl_dbg_tlv_type {
23 IWL_DBG_TLV_TYPE_DEBUG_INFO =
24 IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
25 IWL_DBG_TLV_TYPE_BUF_ALLOC,
26 IWL_DBG_TLV_TYPE_HCMD,
27 IWL_DBG_TLV_TYPE_REGION,
28 IWL_DBG_TLV_TYPE_TRIGGER,
29 IWL_DBG_TLV_TYPE_CONF_SET,
30 IWL_DBG_TLV_TYPE_NUM,
31 };
32
33 /**
34 * struct iwl_dbg_tlv_ver_data - debug TLV version struct
35 * @min_ver: min version supported
36 * @max_ver: max version supported
37 */
38 struct iwl_dbg_tlv_ver_data {
39 int min_ver;
40 int max_ver;
41 };
42
43 /**
44 * struct iwl_dbg_tlv_timer_node - timer node struct
45 * @list: list of &struct iwl_dbg_tlv_timer_node
46 * @timer: timer
47 * @fwrt: &struct iwl_fw_runtime
48 * @tlv: TLV attach to the timer node
49 */
50 struct iwl_dbg_tlv_timer_node {
51 struct list_head list;
52 struct timer_list timer;
53 struct iwl_fw_runtime *fwrt;
54 struct iwl_ucode_tlv *tlv;
55 };
56
57 static const struct iwl_dbg_tlv_ver_data
58 dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
59 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
60 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
61 [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
62 [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 3,},
63 [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
64 [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
65 };
66
iwl_dbg_tlv_add(const struct iwl_ucode_tlv * tlv,struct list_head * list)67 static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
68 struct list_head *list)
69 {
70 u32 len = le32_to_cpu(tlv->length);
71 struct iwl_dbg_tlv_node *node;
72
73 node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
74 if (!node)
75 return -ENOMEM;
76
77 memcpy(&node->tlv, tlv, sizeof(node->tlv));
78 memcpy(node->tlv.data, tlv->data, len);
79 list_add_tail(&node->list, list);
80
81 return 0;
82 }
83
iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv * tlv)84 static bool iwl_dbg_tlv_ver_support(const struct iwl_ucode_tlv *tlv)
85 {
86 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
87 u32 type = le32_to_cpu(tlv->type);
88 u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
89 u32 ver = le32_to_cpu(hdr->version);
90
91 if (ver < dbg_ver_table[tlv_idx].min_ver ||
92 ver > dbg_ver_table[tlv_idx].max_ver)
93 return false;
94
95 return true;
96 }
97
iwl_dbg_tlv_alloc_debug_info(struct iwl_trans * trans,const struct iwl_ucode_tlv * tlv)98 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
99 const struct iwl_ucode_tlv *tlv)
100 {
101 const struct iwl_fw_ini_debug_info_tlv *debug_info = (const void *)tlv->data;
102
103 if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
104 return -EINVAL;
105
106 IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
107 debug_info->debug_cfg_name);
108
109 return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
110 }
111
iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans * trans,const struct iwl_ucode_tlv * tlv)112 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
113 const struct iwl_ucode_tlv *tlv)
114 {
115 const struct iwl_fw_ini_allocation_tlv *alloc = (const void *)tlv->data;
116 u32 buf_location;
117 u32 alloc_id;
118
119 if (le32_to_cpu(tlv->length) != sizeof(*alloc))
120 return -EINVAL;
121
122 buf_location = le32_to_cpu(alloc->buf_location);
123 alloc_id = le32_to_cpu(alloc->alloc_id);
124
125 if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
126 buf_location >= IWL_FW_INI_LOCATION_NUM)
127 goto err;
128
129 if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
130 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
131 goto err;
132
133 if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
134 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
135 goto err;
136
137 if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
138 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
139 goto err;
140
141 if (buf_location == IWL_FW_INI_LOCATION_DRAM_PATH &&
142 alloc->req_size == 0) {
143 IWL_ERR(trans, "WRT: Invalid DRAM buffer allocation requested size (0)\n");
144 return -EINVAL;
145 }
146
147 trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
148
149 return 0;
150 err:
151 IWL_ERR(trans,
152 "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
153 alloc_id, buf_location);
154 return -EINVAL;
155 }
156
iwl_dbg_tlv_alloc_hcmd(struct iwl_trans * trans,const struct iwl_ucode_tlv * tlv)157 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
158 const struct iwl_ucode_tlv *tlv)
159 {
160 const struct iwl_fw_ini_hcmd_tlv *hcmd = (const void *)tlv->data;
161 u32 tp = le32_to_cpu(hcmd->time_point);
162
163 if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
164 return -EINVAL;
165
166 /* Host commands can not be sent in early time point since the FW
167 * is not ready
168 */
169 if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
170 tp >= IWL_FW_INI_TIME_POINT_NUM ||
171 tp == IWL_FW_INI_TIME_POINT_EARLY) {
172 IWL_ERR(trans,
173 "WRT: Invalid time point %u for host command TLV\n",
174 tp);
175 return -EINVAL;
176 }
177
178 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
179 }
180
iwl_dbg_tlv_alloc_region(struct iwl_trans * trans,const struct iwl_ucode_tlv * tlv)181 static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
182 const struct iwl_ucode_tlv *tlv)
183 {
184 const struct iwl_fw_ini_region_tlv *reg = (const void *)tlv->data;
185 struct iwl_ucode_tlv **active_reg;
186 u32 id = le32_to_cpu(reg->id);
187 u8 type = reg->type;
188 u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
189
190 /*
191 * The higher part of the ID from version 2 is debug policy.
192 * The id will be only lsb 16 bits, so mask it out.
193 */
194 if (le32_to_cpu(reg->hdr.version) >= 2)
195 id &= IWL_FW_INI_REGION_ID_MASK;
196
197 if (le32_to_cpu(tlv->length) < sizeof(*reg))
198 return -EINVAL;
199
200 /* for safe use of a string from FW, limit it to IWL_FW_INI_MAX_NAME */
201 IWL_DEBUG_FW(trans, "WRT: parsing region: %.*s\n",
202 IWL_FW_INI_MAX_NAME, reg->name);
203
204 if (id >= IWL_FW_INI_MAX_REGION_ID) {
205 IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
206 return -EINVAL;
207 }
208
209 if (type <= IWL_FW_INI_REGION_INVALID ||
210 type >= IWL_FW_INI_REGION_NUM) {
211 IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
212 return -EINVAL;
213 }
214
215 if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
216 !trans->ops->read_config32) {
217 IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
218 return -EOPNOTSUPP;
219 }
220
221 if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
222 trans->dbg.imr_data.sram_addr =
223 le32_to_cpu(reg->internal_buffer.base_addr);
224 trans->dbg.imr_data.sram_size =
225 le32_to_cpu(reg->internal_buffer.size);
226 }
227
228
229 active_reg = &trans->dbg.active_regions[id];
230 if (*active_reg) {
231 IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
232
233 kfree(*active_reg);
234 }
235
236 *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
237 if (!*active_reg)
238 return -ENOMEM;
239
240 IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
241
242 return 0;
243 }
244
iwl_dbg_tlv_alloc_trigger(struct iwl_trans * trans,const struct iwl_ucode_tlv * tlv)245 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
246 const struct iwl_ucode_tlv *tlv)
247 {
248 const struct iwl_fw_ini_trigger_tlv *trig = (const void *)tlv->data;
249 struct iwl_fw_ini_trigger_tlv *dup_trig;
250 u32 tp = le32_to_cpu(trig->time_point);
251 u32 rf = le32_to_cpu(trig->reset_fw);
252 struct iwl_ucode_tlv *dup = NULL;
253 int ret;
254
255 if (le32_to_cpu(tlv->length) < sizeof(*trig))
256 return -EINVAL;
257
258 if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
259 tp >= IWL_FW_INI_TIME_POINT_NUM) {
260 IWL_ERR(trans,
261 "WRT: Invalid time point %u for trigger TLV\n",
262 tp);
263 return -EINVAL;
264 }
265
266 IWL_DEBUG_FW(trans,
267 "WRT: time point %u for trigger TLV with reset_fw %u\n",
268 tp, rf);
269 trans->dbg.last_tp_resetfw = 0xFF;
270 if (!le32_to_cpu(trig->occurrences)) {
271 dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
272 GFP_KERNEL);
273 if (!dup)
274 return -ENOMEM;
275 dup_trig = (void *)dup->data;
276 dup_trig->occurrences = cpu_to_le32(-1);
277 tlv = dup;
278 }
279
280 ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
281 kfree(dup);
282
283 return ret;
284 }
285
iwl_dbg_tlv_config_set(struct iwl_trans * trans,const struct iwl_ucode_tlv * tlv)286 static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
287 const struct iwl_ucode_tlv *tlv)
288 {
289 const struct iwl_fw_ini_conf_set_tlv *conf_set = (const void *)tlv->data;
290 u32 tp = le32_to_cpu(conf_set->time_point);
291 u32 type = le32_to_cpu(conf_set->set_type);
292
293 if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
294 tp >= IWL_FW_INI_TIME_POINT_NUM) {
295 IWL_DEBUG_FW(trans,
296 "WRT: Invalid time point %u for config set TLV\n", tp);
297 return -EINVAL;
298 }
299
300 if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID ||
301 type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) {
302 IWL_DEBUG_FW(trans,
303 "WRT: Invalid config set type %u for config set TLV\n", type);
304 return -EINVAL;
305 }
306
307 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
308 }
309
310 static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
311 const struct iwl_ucode_tlv *tlv) = {
312 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
313 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
314 [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
315 [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
316 [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
317 [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set,
318 };
319
iwl_dbg_tlv_alloc(struct iwl_trans * trans,const struct iwl_ucode_tlv * tlv,bool ext)320 void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
321 bool ext)
322 {
323 enum iwl_ini_cfg_state *cfg_state = ext ?
324 &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
325 const struct iwl_fw_ini_header *hdr = (const void *)&tlv->data[0];
326 u32 type;
327 u32 tlv_idx;
328 u32 domain;
329 int ret;
330
331 if (le32_to_cpu(tlv->length) < sizeof(*hdr))
332 return;
333
334 type = le32_to_cpu(tlv->type);
335 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
336 domain = le32_to_cpu(hdr->domain);
337
338 if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
339 !(domain & trans->dbg.domains_bitmap)) {
340 IWL_DEBUG_FW(trans,
341 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
342 domain, trans->dbg.domains_bitmap);
343 return;
344 }
345
346 if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
347 IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
348 goto out_err;
349 }
350
351 if (!iwl_dbg_tlv_ver_support(tlv)) {
352 IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
353 le32_to_cpu(hdr->version));
354 goto out_err;
355 }
356
357 ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
358 if (ret) {
359 IWL_WARN(trans,
360 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
361 type, ret, ext);
362 goto out_err;
363 }
364
365 if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
366 *cfg_state = IWL_INI_CFG_STATE_LOADED;
367
368 return;
369
370 out_err:
371 *cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
372 }
373
iwl_dbg_tlv_del_timers(struct iwl_trans * trans)374 void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
375 {
376 struct list_head *timer_list = &trans->dbg.periodic_trig_list;
377 struct iwl_dbg_tlv_timer_node *node, *tmp;
378
379 list_for_each_entry_safe(node, tmp, timer_list, list) {
380 timer_shutdown_sync(&node->timer);
381 list_del(&node->list);
382 kfree(node);
383 }
384 }
385 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
386
iwl_dbg_tlv_fragments_free(struct iwl_trans * trans,enum iwl_fw_ini_allocation_id alloc_id)387 static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
388 enum iwl_fw_ini_allocation_id alloc_id)
389 {
390 struct iwl_fw_mon *fw_mon;
391 int i;
392
393 if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
394 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
395 return;
396
397 fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
398
399 for (i = 0; i < fw_mon->num_frags; i++) {
400 struct iwl_dram_data *frag = &fw_mon->frags[i];
401
402 dma_free_coherent(trans->dev, frag->size, frag->block,
403 frag->physical);
404
405 frag->physical = 0;
406 frag->block = NULL;
407 frag->size = 0;
408 }
409
410 kfree(fw_mon->frags);
411 fw_mon->frags = NULL;
412 fw_mon->num_frags = 0;
413 }
414
iwl_dbg_tlv_free(struct iwl_trans * trans)415 void iwl_dbg_tlv_free(struct iwl_trans *trans)
416 {
417 struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
418 int i;
419
420 iwl_dbg_tlv_del_timers(trans);
421
422 for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
423 struct iwl_ucode_tlv **active_reg =
424 &trans->dbg.active_regions[i];
425
426 kfree(*active_reg);
427 *active_reg = NULL;
428 }
429
430 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
431 &trans->dbg.debug_info_tlv_list, list) {
432 list_del(&tlv_node->list);
433 kfree(tlv_node);
434 }
435
436 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
437 struct iwl_dbg_tlv_time_point_data *tp =
438 &trans->dbg.time_point[i];
439
440 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
441 list) {
442 list_del(&tlv_node->list);
443 kfree(tlv_node);
444 }
445
446 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
447 list) {
448 list_del(&tlv_node->list);
449 kfree(tlv_node);
450 }
451
452 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
453 &tp->active_trig_list, list) {
454 list_del(&tlv_node->list);
455 kfree(tlv_node);
456 }
457
458 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
459 &tp->config_list, list) {
460 list_del(&tlv_node->list);
461 kfree(tlv_node);
462 }
463
464 }
465
466 for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
467 iwl_dbg_tlv_fragments_free(trans, i);
468 }
469
iwl_dbg_tlv_parse_bin(struct iwl_trans * trans,const u8 * data,size_t len)470 static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
471 size_t len)
472 {
473 const struct iwl_ucode_tlv *tlv;
474 u32 tlv_len;
475
476 while (len >= sizeof(*tlv)) {
477 len -= sizeof(*tlv);
478 tlv = (const void *)data;
479
480 tlv_len = le32_to_cpu(tlv->length);
481
482 if (len < tlv_len) {
483 IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
484 len, tlv_len);
485 return -EINVAL;
486 }
487 len -= ALIGN(tlv_len, 4);
488 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
489
490 iwl_dbg_tlv_alloc(trans, tlv, true);
491 }
492
493 return 0;
494 }
495
iwl_dbg_tlv_load_bin(struct device * dev,struct iwl_trans * trans)496 void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
497 {
498 const struct firmware *fw;
499 const char *yoyo_bin = "iwl-debug-yoyo.bin";
500 int res;
501
502 if (!iwlwifi_mod_params.enable_ini ||
503 trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000)
504 return;
505
506 res = firmware_request_nowarn(&fw, yoyo_bin, dev);
507 IWL_DEBUG_FW(trans, "%s %s\n", res ? "didn't load" : "loaded", yoyo_bin);
508
509 if (res)
510 return;
511
512 iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
513
514 release_firmware(fw);
515 }
516
iwl_dbg_tlv_init(struct iwl_trans * trans)517 void iwl_dbg_tlv_init(struct iwl_trans *trans)
518 {
519 int i;
520
521 INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
522 INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
523
524 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
525 struct iwl_dbg_tlv_time_point_data *tp =
526 &trans->dbg.time_point[i];
527
528 INIT_LIST_HEAD(&tp->trig_list);
529 INIT_LIST_HEAD(&tp->hcmd_list);
530 INIT_LIST_HEAD(&tp->active_trig_list);
531 INIT_LIST_HEAD(&tp->config_list);
532 }
533 }
534
iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime * fwrt,struct iwl_dram_data * frag,u32 pages)535 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
536 struct iwl_dram_data *frag, u32 pages)
537 {
538 void *block = NULL;
539 dma_addr_t physical;
540
541 if (!frag || frag->size || !pages)
542 return -EIO;
543
544 /*
545 * We try to allocate as many pages as we can, starting with
546 * the requested amount and going down until we can allocate
547 * something. Because of DIV_ROUND_UP(), pages will never go
548 * down to 0 and stop the loop, so stop when pages reaches 1,
549 * which is too small anyway.
550 */
551 while (pages > 1) {
552 block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
553 &physical,
554 GFP_KERNEL | __GFP_NOWARN);
555 if (block)
556 break;
557
558 IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
559 pages * PAGE_SIZE);
560
561 pages = DIV_ROUND_UP(pages, 2);
562 }
563
564 if (!block)
565 return -ENOMEM;
566
567 frag->physical = physical;
568 frag->block = block;
569 frag->size = pages * PAGE_SIZE;
570
571 return pages;
572 }
573
iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime * fwrt,enum iwl_fw_ini_allocation_id alloc_id)574 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
575 enum iwl_fw_ini_allocation_id alloc_id)
576 {
577 struct iwl_fw_mon *fw_mon;
578 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
579 u32 num_frags, remain_pages, frag_pages;
580 int i;
581
582 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
583 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
584 return -EIO;
585
586 fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
587 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
588
589 if (fw_mon->num_frags) {
590 for (i = 0; i < fw_mon->num_frags; i++)
591 memset(fw_mon->frags[i].block, 0,
592 fw_mon->frags[i].size);
593 return 0;
594 }
595
596 if (fw_mon_cfg->buf_location !=
597 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
598 return 0;
599
600 num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
601 if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
602 if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
603 return -EIO;
604 num_frags = 1;
605 } else if (fwrt->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ &&
606 alloc_id > IWL_FW_INI_ALLOCATION_ID_DBGC3) {
607 return -EIO;
608 }
609
610 remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
611 PAGE_SIZE);
612 num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
613 num_frags = min_t(u32, num_frags, remain_pages);
614 frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
615
616 fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
617 if (!fw_mon->frags)
618 return -ENOMEM;
619
620 for (i = 0; i < num_frags; i++) {
621 int pages = min_t(u32, frag_pages, remain_pages);
622
623 IWL_DEBUG_FW(fwrt,
624 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
625 alloc_id, i, pages * PAGE_SIZE);
626
627 pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
628 pages);
629 if (pages < 0) {
630 u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
631 (remain_pages * PAGE_SIZE);
632
633 if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
634 iwl_dbg_tlv_fragments_free(fwrt->trans,
635 alloc_id);
636 return pages;
637 }
638 break;
639 }
640
641 remain_pages -= pages;
642 fw_mon->num_frags++;
643 }
644
645 return 0;
646 }
647
iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime * fwrt,enum iwl_fw_ini_allocation_id alloc_id)648 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
649 enum iwl_fw_ini_allocation_id alloc_id)
650 {
651 struct iwl_fw_mon *fw_mon;
652 u32 remain_frags, num_commands;
653 int i, fw_mon_idx = 0;
654
655 if (!fw_has_capa(&fwrt->fw->ucode_capa,
656 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
657 return 0;
658
659 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
660 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
661 return -EIO;
662
663 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
664 IWL_FW_INI_LOCATION_DRAM_PATH)
665 return 0;
666
667 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
668
669 /* the first fragment of DBGC1 is given to the FW via register
670 * or context info
671 */
672 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
673 fw_mon_idx++;
674
675 remain_frags = fw_mon->num_frags - fw_mon_idx;
676 if (!remain_frags)
677 return 0;
678
679 num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
680
681 IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
682 alloc_id);
683
684 for (i = 0; i < num_commands; i++) {
685 u32 num_frags = min_t(u32, remain_frags,
686 BUF_ALLOC_MAX_NUM_FRAGS);
687 struct iwl_buf_alloc_cmd data = {
688 .alloc_id = cpu_to_le32(alloc_id),
689 .num_frags = cpu_to_le32(num_frags),
690 .buf_location =
691 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
692 };
693 struct iwl_host_cmd hcmd = {
694 .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
695 .data[0] = &data,
696 .len[0] = sizeof(data),
697 .flags = CMD_SEND_IN_RFKILL,
698 };
699 int ret, j;
700
701 for (j = 0; j < num_frags; j++) {
702 struct iwl_buf_alloc_frag *frag = &data.frags[j];
703 struct iwl_dram_data *fw_mon_frag =
704 &fw_mon->frags[fw_mon_idx++];
705
706 frag->addr = cpu_to_le64(fw_mon_frag->physical);
707 frag->size = cpu_to_le32(fw_mon_frag->size);
708 }
709 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
710 if (ret)
711 return ret;
712
713 remain_frags -= num_frags;
714 }
715
716 return 0;
717 }
718
iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime * fwrt)719 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
720 {
721 int ret, i;
722
723 if (fw_has_capa(&fwrt->fw->ucode_capa,
724 IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
725 return;
726
727 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
728 ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
729 if (ret)
730 IWL_WARN(fwrt,
731 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
732 i, ret);
733 }
734 }
735
iwl_dbg_tlv_update_dram(struct iwl_fw_runtime * fwrt,enum iwl_fw_ini_allocation_id alloc_id,struct iwl_dram_info * dram_info)736 static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
737 enum iwl_fw_ini_allocation_id alloc_id,
738 struct iwl_dram_info *dram_info)
739 {
740 struct iwl_fw_mon *fw_mon;
741 u32 remain_frags, num_frags;
742 int j, fw_mon_idx = 0;
743 struct iwl_buf_alloc_cmd *data;
744
745 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
746 IWL_FW_INI_LOCATION_DRAM_PATH) {
747 IWL_DEBUG_FW(fwrt, "WRT: alloc_id %u location is not in DRAM_PATH\n",
748 alloc_id);
749 return -1;
750 }
751
752 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
753
754 /* the first fragment of DBGC1 is given to the FW via register
755 * or context info
756 */
757 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
758 fw_mon_idx++;
759
760 remain_frags = fw_mon->num_frags - fw_mon_idx;
761 if (!remain_frags)
762 return -1;
763
764 num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
765 data = &dram_info->dram_frags[alloc_id - 1];
766 data->alloc_id = cpu_to_le32(alloc_id);
767 data->num_frags = cpu_to_le32(num_frags);
768 data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH);
769
770 IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n",
771 cpu_to_le32(alloc_id), cpu_to_le32(num_frags));
772
773 for (j = 0; j < num_frags; j++) {
774 struct iwl_buf_alloc_frag *frag = &data->frags[j];
775 struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++];
776
777 frag->addr = cpu_to_le64(fw_mon_frag->physical);
778 frag->size = cpu_to_le32(fw_mon_frag->size);
779 IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n");
780 IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n",
781 j, cpu_to_le64(fw_mon_frag->physical),
782 cpu_to_le32(fw_mon_frag->size));
783 }
784 return 0;
785 }
786
iwl_dbg_tlv_update_drams(struct iwl_fw_runtime * fwrt)787 static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
788 {
789 int ret, i;
790 bool dram_alloc = false;
791 struct iwl_dram_data *frags =
792 &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
793 struct iwl_dram_info *dram_info;
794
795 if (!frags || !frags->block)
796 return;
797
798 dram_info = frags->block;
799
800 if (!fw_has_capa(&fwrt->fw->ucode_capa,
801 IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
802 return;
803
804 memset(dram_info, 0, sizeof(*dram_info));
805
806 for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
807 i < IWL_FW_INI_ALLOCATION_NUM; i++) {
808 if (fwrt->trans->dbg.fw_mon_cfg[i].buf_location ==
809 IWL_FW_INI_LOCATION_INVALID)
810 continue;
811
812 ret = iwl_dbg_tlv_update_dram(fwrt, i, dram_info);
813 if (!ret)
814 dram_alloc = true;
815 else
816 IWL_INFO(fwrt,
817 "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
818 i, ret);
819 }
820
821 if (dram_alloc) {
822 dram_info->first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
823 dram_info->second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
824 }
825 }
826
iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime * fwrt,struct list_head * hcmd_list)827 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
828 struct list_head *hcmd_list)
829 {
830 struct iwl_dbg_tlv_node *node;
831
832 list_for_each_entry(node, hcmd_list, list) {
833 struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
834 struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
835 u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
836 struct iwl_host_cmd cmd = {
837 .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
838 .len = { hcmd_len, },
839 .data = { hcmd_data->data, },
840 };
841
842 iwl_trans_send_cmd(fwrt->trans, &cmd);
843 }
844 }
845
iwl_dbg_tlv_apply_config(struct iwl_fw_runtime * fwrt,struct list_head * conf_list)846 static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
847 struct list_head *conf_list)
848 {
849 struct iwl_dbg_tlv_node *node;
850
851 list_for_each_entry(node, conf_list, list) {
852 struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
853 u32 count, address, value;
854 u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
855 u32 type = le32_to_cpu(config_list->set_type);
856 u32 offset = le32_to_cpu(config_list->addr_offset);
857
858 switch (type) {
859 case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: {
860 if (!iwl_trans_grab_nic_access(fwrt->trans)) {
861 IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n");
862 IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n");
863 continue;
864 }
865 IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len);
866 for (count = 0; count < len; count++) {
867 address = le32_to_cpu(config_list->addr_val[count].address);
868 value = le32_to_cpu(config_list->addr_val[count].value);
869 iwl_trans_write_prph(fwrt->trans, address + offset, value);
870 }
871 iwl_trans_release_nic_access(fwrt->trans);
872 break;
873 }
874 case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: {
875 for (count = 0; count < len; count++) {
876 address = le32_to_cpu(config_list->addr_val[count].address);
877 value = le32_to_cpu(config_list->addr_val[count].value);
878 iwl_trans_write_mem32(fwrt->trans, address + offset, value);
879 IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n",
880 count, address, value);
881 }
882 break;
883 }
884 case IWL_FW_INI_CONFIG_SET_TYPE_CSR: {
885 for (count = 0; count < len; count++) {
886 address = le32_to_cpu(config_list->addr_val[count].address);
887 value = le32_to_cpu(config_list->addr_val[count].value);
888 iwl_write32(fwrt->trans, address + offset, value);
889 IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n",
890 count, address, value);
891 }
892 break;
893 }
894 case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
895 struct iwl_dbgc1_info dram_info = {};
896 struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
897 __le64 dram_base_addr;
898 __le32 dram_size;
899 u64 dram_addr;
900 u32 ret;
901
902 if (!frags)
903 break;
904
905 dram_base_addr = cpu_to_le64(frags->physical);
906 dram_size = cpu_to_le32(frags->size);
907 dram_addr = le64_to_cpu(dram_base_addr);
908
909 IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
910 dram_base_addr, dram_size);
911 IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
912 le32_to_cpu(config_list->addr_offset));
913 for (count = 0; count < len; count++) {
914 address = le32_to_cpu(config_list->addr_val[count].address);
915 dram_info.dbgc1_add_lsb =
916 cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400);
917 dram_info.dbgc1_add_msb =
918 cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32);
919 dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400);
920 ret = iwl_trans_write_mem(fwrt->trans,
921 address + offset, &dram_info, 4);
922 if (ret) {
923 IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n");
924 break;
925 }
926 }
927 break;
928 }
929 case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: {
930 u32 debug_token_config =
931 le32_to_cpu(config_list->addr_val[0].value);
932
933 IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n",
934 debug_token_config);
935 fwrt->trans->dbg.ucode_preset = debug_token_config;
936 break;
937 }
938 default:
939 break;
940 }
941 }
942 }
943
iwl_dbg_tlv_periodic_trig_handler(struct timer_list * t)944 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
945 {
946 struct iwl_dbg_tlv_timer_node *timer_node =
947 from_timer(timer_node, t, timer);
948 struct iwl_fwrt_dump_data dump_data = {
949 .trig = (void *)timer_node->tlv->data,
950 };
951 int ret;
952
953 ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data, false);
954 if (!ret || ret == -EBUSY) {
955 u32 occur = le32_to_cpu(dump_data.trig->occurrences);
956 u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
957
958 if (!occur)
959 return;
960
961 mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
962 }
963 }
964
iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime * fwrt)965 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
966 {
967 struct iwl_dbg_tlv_node *node;
968 struct list_head *trig_list =
969 &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
970
971 list_for_each_entry(node, trig_list, list) {
972 struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
973 struct iwl_dbg_tlv_timer_node *timer_node;
974 u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
975 u32 min_interval = 100;
976
977 if (!occur)
978 continue;
979
980 /* make sure there is at least one dword of data for the
981 * interval value
982 */
983 if (le32_to_cpu(node->tlv.length) <
984 sizeof(*trig) + sizeof(__le32)) {
985 IWL_ERR(fwrt,
986 "WRT: Invalid periodic trigger data was not given\n");
987 continue;
988 }
989
990 if (le32_to_cpu(trig->data[0]) < min_interval) {
991 IWL_WARN(fwrt,
992 "WRT: Override min interval from %u to %u msec\n",
993 le32_to_cpu(trig->data[0]), min_interval);
994 trig->data[0] = cpu_to_le32(min_interval);
995 }
996
997 collect_interval = le32_to_cpu(trig->data[0]);
998
999 timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
1000 if (!timer_node) {
1001 IWL_ERR(fwrt,
1002 "WRT: Failed to allocate periodic trigger\n");
1003 continue;
1004 }
1005
1006 timer_node->fwrt = fwrt;
1007 timer_node->tlv = &node->tlv;
1008 timer_setup(&timer_node->timer,
1009 iwl_dbg_tlv_periodic_trig_handler, 0);
1010
1011 list_add_tail(&timer_node->list,
1012 &fwrt->trans->dbg.periodic_trig_list);
1013
1014 IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
1015
1016 mod_timer(&timer_node->timer,
1017 jiffies + msecs_to_jiffies(collect_interval));
1018 }
1019 }
1020
is_trig_data_contained(const struct iwl_ucode_tlv * new,const struct iwl_ucode_tlv * old)1021 static bool is_trig_data_contained(const struct iwl_ucode_tlv *new,
1022 const struct iwl_ucode_tlv *old)
1023 {
1024 const struct iwl_fw_ini_trigger_tlv *new_trig = (const void *)new->data;
1025 const struct iwl_fw_ini_trigger_tlv *old_trig = (const void *)old->data;
1026 const __le32 *new_data = new_trig->data, *old_data = old_trig->data;
1027 u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
1028 u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
1029 int i, j;
1030
1031 for (i = 0; i < new_dwords_num; i++) {
1032 bool match = false;
1033
1034 for (j = 0; j < old_dwords_num; j++) {
1035 if (new_data[i] == old_data[j]) {
1036 match = true;
1037 break;
1038 }
1039 }
1040 if (!match)
1041 return false;
1042 }
1043
1044 return true;
1045 }
1046
iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime * fwrt,struct iwl_ucode_tlv * trig_tlv,struct iwl_dbg_tlv_node * node)1047 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
1048 struct iwl_ucode_tlv *trig_tlv,
1049 struct iwl_dbg_tlv_node *node)
1050 {
1051 struct iwl_ucode_tlv *node_tlv = &node->tlv;
1052 struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
1053 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
1054 u32 policy = le32_to_cpu(trig->apply_policy);
1055 u32 size = le32_to_cpu(trig_tlv->length);
1056 u32 trig_data_len = size - sizeof(*trig);
1057 u32 offset = 0;
1058
1059 if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
1060 u32 data_len = le32_to_cpu(node_tlv->length) -
1061 sizeof(*node_trig);
1062
1063 IWL_DEBUG_FW(fwrt,
1064 "WRT: Appending trigger data (time point %u)\n",
1065 le32_to_cpu(trig->time_point));
1066
1067 offset += data_len;
1068 size += data_len;
1069 } else {
1070 IWL_DEBUG_FW(fwrt,
1071 "WRT: Overriding trigger data (time point %u)\n",
1072 le32_to_cpu(trig->time_point));
1073 }
1074
1075 if (size != le32_to_cpu(node_tlv->length)) {
1076 struct list_head *prev = node->list.prev;
1077 struct iwl_dbg_tlv_node *tmp;
1078
1079 list_del(&node->list);
1080
1081 tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
1082 if (!tmp) {
1083 IWL_WARN(fwrt,
1084 "WRT: No memory to override trigger (time point %u)\n",
1085 le32_to_cpu(trig->time_point));
1086
1087 list_add(&node->list, prev);
1088
1089 return -ENOMEM;
1090 }
1091
1092 list_add(&tmp->list, prev);
1093 node_tlv = &tmp->tlv;
1094 node_trig = (void *)node_tlv->data;
1095 }
1096
1097 memcpy(node_trig->data + offset, trig->data, trig_data_len);
1098 node_tlv->length = cpu_to_le32(size);
1099
1100 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
1101 IWL_DEBUG_FW(fwrt,
1102 "WRT: Overriding trigger configuration (time point %u)\n",
1103 le32_to_cpu(trig->time_point));
1104
1105 /* the first 11 dwords are configuration related */
1106 memcpy(node_trig, trig, sizeof(__le32) * 11);
1107 }
1108
1109 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
1110 IWL_DEBUG_FW(fwrt,
1111 "WRT: Overriding trigger regions (time point %u)\n",
1112 le32_to_cpu(trig->time_point));
1113
1114 node_trig->regions_mask = trig->regions_mask;
1115 } else {
1116 IWL_DEBUG_FW(fwrt,
1117 "WRT: Appending trigger regions (time point %u)\n",
1118 le32_to_cpu(trig->time_point));
1119
1120 node_trig->regions_mask |= trig->regions_mask;
1121 }
1122
1123 return 0;
1124 }
1125
1126 static int
iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime * fwrt,struct list_head * trig_list,struct iwl_ucode_tlv * trig_tlv)1127 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
1128 struct list_head *trig_list,
1129 struct iwl_ucode_tlv *trig_tlv)
1130 {
1131 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
1132 struct iwl_dbg_tlv_node *node, *match = NULL;
1133 u32 policy = le32_to_cpu(trig->apply_policy);
1134
1135 list_for_each_entry(node, trig_list, list) {
1136 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
1137 break;
1138
1139 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
1140 is_trig_data_contained(trig_tlv, &node->tlv)) {
1141 match = node;
1142 break;
1143 }
1144 }
1145
1146 if (!match) {
1147 IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
1148 le32_to_cpu(trig->time_point));
1149 return iwl_dbg_tlv_add(trig_tlv, trig_list);
1150 }
1151
1152 return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
1153 }
1154
1155 static void
iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime * fwrt,struct iwl_dbg_tlv_time_point_data * tp)1156 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
1157 struct iwl_dbg_tlv_time_point_data *tp)
1158 {
1159 struct iwl_dbg_tlv_node *node;
1160 struct list_head *trig_list = &tp->trig_list;
1161 struct list_head *active_trig_list = &tp->active_trig_list;
1162
1163 list_for_each_entry(node, trig_list, list) {
1164 struct iwl_ucode_tlv *tlv = &node->tlv;
1165
1166 iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
1167 }
1168 }
1169
iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime * fwrt,struct iwl_fwrt_dump_data * dump_data,union iwl_dbg_tlv_tp_data * tp_data,u32 trig_data)1170 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
1171 struct iwl_fwrt_dump_data *dump_data,
1172 union iwl_dbg_tlv_tp_data *tp_data,
1173 u32 trig_data)
1174 {
1175 struct iwl_rx_packet *pkt = tp_data->fw_pkt;
1176 struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
1177
1178 if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
1179 pkt->hdr.group_id == wanted_hdr->group_id)) {
1180 struct iwl_rx_packet *fw_pkt =
1181 kmemdup(pkt,
1182 sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
1183 GFP_ATOMIC);
1184
1185 if (!fw_pkt)
1186 return false;
1187
1188 dump_data->fw_pkt = fw_pkt;
1189
1190 return true;
1191 }
1192
1193 return false;
1194 }
1195
1196 static int
iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime * fwrt,bool sync,struct list_head * active_trig_list,union iwl_dbg_tlv_tp_data * tp_data,bool (* data_check)(struct iwl_fw_runtime * fwrt,struct iwl_fwrt_dump_data * dump_data,union iwl_dbg_tlv_tp_data * tp_data,u32 trig_data))1197 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
1198 struct list_head *active_trig_list,
1199 union iwl_dbg_tlv_tp_data *tp_data,
1200 bool (*data_check)(struct iwl_fw_runtime *fwrt,
1201 struct iwl_fwrt_dump_data *dump_data,
1202 union iwl_dbg_tlv_tp_data *tp_data,
1203 u32 trig_data))
1204 {
1205 struct iwl_dbg_tlv_node *node;
1206
1207 list_for_each_entry(node, active_trig_list, list) {
1208 struct iwl_fwrt_dump_data dump_data = {
1209 .trig = (void *)node->tlv.data,
1210 };
1211 u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
1212 data);
1213 int ret, i;
1214 u32 tp = le32_to_cpu(dump_data.trig->time_point);
1215
1216
1217 if (!num_data) {
1218 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
1219 if (ret)
1220 return ret;
1221 }
1222
1223 for (i = 0; i < num_data; i++) {
1224 if (!data_check ||
1225 data_check(fwrt, &dump_data, tp_data,
1226 le32_to_cpu(dump_data.trig->data[i]))) {
1227 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data, sync);
1228 if (ret)
1229 return ret;
1230
1231 break;
1232 }
1233 }
1234
1235 fwrt->trans->dbg.restart_required = FALSE;
1236 IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
1237 tp, dump_data.trig->reset_fw);
1238 IWL_DEBUG_FW(fwrt,
1239 "WRT: restart_required %d, last_tp_resetfw %d\n",
1240 fwrt->trans->dbg.restart_required,
1241 fwrt->trans->dbg.last_tp_resetfw);
1242
1243 if (fwrt->trans->trans_cfg->device_family ==
1244 IWL_DEVICE_FAMILY_9000) {
1245 fwrt->trans->dbg.restart_required = TRUE;
1246 } else if (tp == IWL_FW_INI_TIME_POINT_FW_ASSERT &&
1247 fwrt->trans->dbg.last_tp_resetfw ==
1248 IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
1249 fwrt->trans->dbg.restart_required = FALSE;
1250 fwrt->trans->dbg.last_tp_resetfw = 0xFF;
1251 IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
1252 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1253 IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
1254 IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
1255 fwrt->trans->dbg.restart_required = TRUE;
1256 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1257 IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
1258 IWL_DEBUG_FW(fwrt,
1259 "WRT: stop only and no reload firmware\n");
1260 fwrt->trans->dbg.restart_required = FALSE;
1261 fwrt->trans->dbg.last_tp_resetfw =
1262 le32_to_cpu(dump_data.trig->reset_fw);
1263 } else if (le32_to_cpu(dump_data.trig->reset_fw) ==
1264 IWL_FW_INI_RESET_FW_MODE_NOTHING) {
1265 IWL_DEBUG_FW(fwrt,
1266 "WRT: nothing need to be done after debug collection\n");
1267 } else {
1268 IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
1269 le32_to_cpu(dump_data.trig->reset_fw));
1270 }
1271 }
1272 return 0;
1273 }
1274
iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime * fwrt)1275 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
1276 {
1277 enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
1278 int ret, i;
1279 u32 failed_alloc = 0;
1280
1281 if (*ini_dest == IWL_FW_INI_LOCATION_INVALID) {
1282 IWL_DEBUG_FW(fwrt,
1283 "WRT: Generating active triggers list, domain 0x%x\n",
1284 fwrt->trans->dbg.domains_bitmap);
1285
1286 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
1287 struct iwl_dbg_tlv_time_point_data *tp =
1288 &fwrt->trans->dbg.time_point[i];
1289
1290 iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
1291 }
1292 } else if (*ini_dest != IWL_FW_INI_LOCATION_DRAM_PATH) {
1293 /* For DRAM, go through the loop below to clear all the buffers
1294 * properly on restart, otherwise garbage may be left there and
1295 * leak into new debug dumps.
1296 */
1297 return;
1298 }
1299
1300 *ini_dest = IWL_FW_INI_LOCATION_INVALID;
1301 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
1302 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
1303 &fwrt->trans->dbg.fw_mon_cfg[i];
1304 u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
1305
1306 if (dest == IWL_FW_INI_LOCATION_INVALID) {
1307 failed_alloc |= BIT(i);
1308 continue;
1309 }
1310
1311 if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
1312 *ini_dest = dest;
1313
1314 if (dest != *ini_dest)
1315 continue;
1316
1317 ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
1318
1319 if (ret) {
1320 IWL_WARN(fwrt,
1321 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1322 i, ret);
1323 failed_alloc |= BIT(i);
1324 }
1325 }
1326
1327 if (!failed_alloc)
1328 return;
1329
1330 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions) && failed_alloc; i++) {
1331 struct iwl_fw_ini_region_tlv *reg;
1332 struct iwl_ucode_tlv **active_reg =
1333 &fwrt->trans->dbg.active_regions[i];
1334 u32 reg_type;
1335
1336 if (!*active_reg) {
1337 fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
1338 continue;
1339 }
1340
1341 reg = (void *)(*active_reg)->data;
1342 reg_type = reg->type;
1343
1344 if (reg_type != IWL_FW_INI_REGION_DRAM_BUFFER ||
1345 !(BIT(le32_to_cpu(reg->dram_alloc_id)) & failed_alloc))
1346 continue;
1347
1348 IWL_DEBUG_FW(fwrt,
1349 "WRT: removing allocation id %d from region id %d\n",
1350 le32_to_cpu(reg->dram_alloc_id), i);
1351
1352 failed_alloc &= ~BIT(le32_to_cpu(reg->dram_alloc_id));
1353 fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
1354
1355 kfree(*active_reg);
1356 *active_reg = NULL;
1357 }
1358 }
1359
_iwl_dbg_tlv_time_point(struct iwl_fw_runtime * fwrt,enum iwl_fw_ini_time_point tp_id,union iwl_dbg_tlv_tp_data * tp_data,bool sync)1360 void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
1361 enum iwl_fw_ini_time_point tp_id,
1362 union iwl_dbg_tlv_tp_data *tp_data,
1363 bool sync)
1364 {
1365 struct list_head *hcmd_list, *trig_list, *conf_list;
1366
1367 if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
1368 tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
1369 tp_id >= IWL_FW_INI_TIME_POINT_NUM)
1370 return;
1371
1372 hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
1373 trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
1374 conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list;
1375
1376 switch (tp_id) {
1377 case IWL_FW_INI_TIME_POINT_EARLY:
1378 iwl_dbg_tlv_init_cfg(fwrt);
1379 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1380 iwl_dbg_tlv_update_drams(fwrt);
1381 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1382 break;
1383 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
1384 iwl_dbg_tlv_apply_buffers(fwrt);
1385 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1386 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1387 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1388 break;
1389 case IWL_FW_INI_TIME_POINT_PERIODIC:
1390 iwl_dbg_tlv_set_periodic_trigs(fwrt);
1391 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1392 break;
1393 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
1394 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
1395 case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
1396 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1397 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1398 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
1399 iwl_dbg_tlv_check_fw_pkt);
1400 break;
1401 default:
1402 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1403 iwl_dbg_tlv_apply_config(fwrt, conf_list);
1404 iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
1405 break;
1406 }
1407 }
1408 IWL_EXPORT_SYMBOL(_iwl_dbg_tlv_time_point);
1409