Lines Matching +full:reg +full:- +full:addr
1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (c) 2018-2020, Silicon Laboratories, Inc.
47 * #define list_for_print_symbolic list_names { -1, NULL }
99 #define hif_msg_list hif_msg_list_enum { -1, NULL }
157 #define hif_mib_list hif_mib_list_enum { -1, NULL }
175 __entry->tx_fill_level = tx_fill_level;
176 __entry->msg_len = le16_to_cpu(hif->len);
177 __entry->msg_id = hif->id;
178 __entry->if_id = hif->interface;
180 __entry->msg_type = __entry->msg_id & 0x80 ? "IND" : "CNF";
182 __entry->msg_type = "REQ";
184 (__entry->msg_id == HIF_REQ_ID_READ_MIB ||
185 __entry->msg_id == HIF_REQ_ID_WRITE_MIB)) {
186 __entry->mib = le16_to_cpup((__le16 *)hif->body);
189 __entry->mib = -1;
192 __entry->buf_len = min_t(int, __entry->msg_len, sizeof(__entry->buf))
193 - sizeof(struct wfx_hif_msg) - header_len;
194 memcpy(__entry->buf, hif->body + header_len, __entry->buf_len);
197 __entry->tx_fill_level,
198 __entry->if_id,
199 __entry->msg_type,
200 __print_symbolic(__entry->msg_id, hif_msg_list),
201 __entry->mib != -1 ? "/" : "",
202 __entry->mib != -1 ? __print_symbolic(__entry->mib, hif_mib_list) : "",
203 __print_hex(__entry->buf, __entry->buf_len),
204 __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
205 __entry->msg_len
234 #define wfx_reg_list wfx_reg_list_enum { -1, NULL }
237 TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
238 TP_ARGS(reg, addr, io_buf, len),
240 __field(int, reg)
241 __field(int, addr)
248 __entry->reg = reg;
249 __entry->addr = addr;
250 __entry->msg_len = len;
251 __entry->buf_len = min_t(int, sizeof(__entry->buf), __entry->msg_len);
252 memcpy(__entry->buf, io_buf, __entry->buf_len);
253 if (addr >= 0)
254 snprintf(__entry->addr_str, 10, "/%08x", addr);
256 __entry->addr_str[0] = 0;
259 __print_symbolic(__entry->reg, wfx_reg_list),
260 __entry->addr_str,
261 __print_hex(__entry->buf, __entry->buf_len),
262 __entry->msg_len > sizeof(__entry->buf) ? " ..." : "",
263 __entry->msg_len
267 TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
268 TP_ARGS(reg, addr, io_buf, len));
269 #define _trace_io_ind_write(reg, addr, io_buf, len)\ argument
270 trace_io_write(reg, addr, io_buf, len)
271 #define _trace_io_write(reg, io_buf, len) trace_io_write(reg, -1, io_buf, len) argument
273 TP_PROTO(int reg, int addr, const void *io_buf, size_t len),
274 TP_ARGS(reg, addr, io_buf, len));
275 #define _trace_io_ind_read(reg, addr, io_buf, len)\ argument
276 trace_io_read(reg, addr, io_buf, len)
277 #define _trace_io_read(reg, io_buf, len) trace_io_read(reg, -1, io_buf, len) argument
280 TP_PROTO(int reg, int addr, u32 val),
281 TP_ARGS(reg, addr, val),
283 __field(int, reg)
284 __field(int, addr)
289 __entry->reg = reg;
290 __entry->addr = addr;
291 __entry->val = val;
292 if (addr >= 0)
293 snprintf(__entry->addr_str, 10, "/%08x", addr);
295 __entry->addr_str[0] = 0;
298 __print_symbolic(__entry->reg, wfx_reg_list),
299 __entry->addr_str,
300 __entry->val
304 TP_PROTO(int reg, int addr, u32 val),
305 TP_ARGS(reg, addr, val));
306 #define _trace_io_ind_write32(reg, addr, val) trace_io_write32(reg, addr, val) argument
307 #define _trace_io_write32(reg, val) trace_io_write32(reg, -1, val) argument
309 TP_PROTO(int reg, int addr, u32 val),
310 TP_ARGS(reg, addr, val));
311 #define _trace_io_ind_read32(reg, addr, val) trace_io_read32(reg, addr, val) argument
312 #define _trace_io_read32(reg, val) trace_io_read32(reg, -1, val) argument
322 __entry->val = val;
323 __entry->ignored = ignored;
326 __entry->val,
327 __entry->ignored ? " (ignored)" : ""
346 __entry->ind = ind;
347 __entry->req = req;
348 __entry->cnf = cnf;
349 __entry->busy = busy;
350 __entry->release = release;
353 __entry->ind,
354 __entry->req,
355 __entry->cnf,
356 __entry->busy,
357 __entry->release ? "release" : "keep"
381 (const struct ieee80211_tx_info *)skb->cb;
382 const struct ieee80211_tx_rate *rates = tx_info->driver_rates;
385 __entry->pkt_id = tx_cnf->packet_id;
386 __entry->delay_media = le32_to_cpu(tx_cnf->media_delay);
387 __entry->delay_queue = le32_to_cpu(tx_cnf->tx_queue_delay);
388 __entry->delay_fw = delay;
389 __entry->ack_failures = tx_cnf->ack_failures;
390 if (!tx_cnf->status || __entry->ack_failures)
391 __entry->ack_failures += 1;
395 __entry->rate[i] = rates[i].idx;
397 __entry->rate[i] = hw_rate[rates[i].idx];
398 __entry->tx_count[i] = rates[i].count;
400 __entry->flags = 0;
402 __entry->flags |= 0x01;
404 __entry->flags |= 0x02;
406 __entry->flags |= 0x04;
408 __entry->flags |= 0x08;
409 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
410 __entry->flags |= 0x10;
411 if (tx_cnf->status)
412 __entry->flags |= 0x20;
413 if (tx_cnf->status == HIF_STATUS_TX_FAIL_REQUEUE)
414 __entry->flags |= 0x40;
416 …TP_printk("packet ID: %08x, rate policy: %s %d|%d %d|%d %d|%d %d|%d -> %d attempt, Delays media/qu…
417 __entry->pkt_id,
418 __print_flags(__entry->flags, NULL,
421 __entry->rate[0],
422 __entry->tx_count[0],
423 __entry->rate[1],
424 __entry->tx_count[1],
425 __entry->rate[2],
426 __entry->tx_count[2],
427 __entry->rate[3],
428 __entry->tx_count[3],
429 __entry->ack_failures,
430 __entry->delay_media,
431 __entry->delay_queue,
432 __entry->delay_fw
453 __entry->hw[j] = -1;
454 __entry->drv[j] = -1;
455 __entry->cab[j] = -1;
457 __entry->vif_id = -1;
458 __entry->queue_id = -1;
462 j = wvif->id * IEEE80211_NUM_ACS + i;
464 queue = &wvif->tx_queue[i];
465 __entry->hw[j] = atomic_read(&queue->pending_frames);
466 __entry->drv[j] = skb_queue_len(&queue->normal);
467 __entry->cab[j] = skb_queue_len(&queue->cab);
469 __entry->vif_id = wvif->id;
470 __entry->queue_id = i;
476 __entry->vif_id, __entry->queue_id,
477 __entry->hw[0], __entry->drv[0], __entry->cab[0],
478 __entry->hw[1], __entry->drv[1], __entry->cab[1],
479 __entry->hw[2], __entry->drv[2], __entry->cab[2],
480 __entry->hw[3], __entry->drv[3], __entry->cab[3],
481 __entry->hw[4], __entry->drv[4], __entry->cab[4],
482 __entry->hw[5], __entry->drv[5], __entry->cab[5],
483 __entry->hw[6], __entry->drv[6], __entry->cab[6],
484 __entry->hw[7], __entry->drv[7], __entry->cab[7]