Lines Matching refs:pkg
146 tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) in tb_cfg_request_find() argument
151 mutex_lock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
152 list_for_each_entry(req, &pkg->ctl->request_queue, list) { in tb_cfg_request_find()
154 if (req->match(req, pkg)) { in tb_cfg_request_find()
160 mutex_unlock(&pkg->ctl->request_queue_lock); in tb_cfg_request_find()
168 static int check_header(const struct ctl_pkg *pkg, u32 len, in check_header() argument
171 struct tb_cfg_header *header = pkg->buffer; in check_header()
174 if (WARN(len != pkg->frame.size, in check_header()
176 len, pkg->frame.size)) in check_header()
178 if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", in check_header()
179 type, pkg->frame.eof)) in check_header()
181 if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", in check_header()
182 pkg->frame.sof)) in check_header()
220 struct cfg_error_pkg *pkg = response->buffer; in decode_error() local
222 res.response_route = tb_cfg_get_route(&pkg->header); in decode_error()
224 res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, in decode_error()
225 tb_cfg_get_route(&pkg->header)); in decode_error()
229 WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1); in decode_error()
230 WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1); in decode_error()
231 WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1); in decode_error()
233 res.tb_error = pkg->error; in decode_error()
234 res.response_port = pkg->port; in decode_error()
239 static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len, in parse_header() argument
242 struct tb_cfg_header *header = pkg->buffer; in parse_header()
245 if (pkg->frame.eof == TB_CFG_PKG_ERROR) in parse_header()
246 return decode_error(pkg); in parse_header()
250 res.err = check_header(pkg, len, type, route); in parse_header()
298 static void tb_ctl_pkg_free(struct ctl_pkg *pkg) in tb_ctl_pkg_free() argument
300 if (pkg) { in tb_ctl_pkg_free()
301 dma_pool_free(pkg->ctl->frame_pool, in tb_ctl_pkg_free()
302 pkg->buffer, pkg->frame.buffer_phy); in tb_ctl_pkg_free()
303 kfree(pkg); in tb_ctl_pkg_free()
309 struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); in tb_ctl_pkg_alloc() local
310 if (!pkg) in tb_ctl_pkg_alloc()
312 pkg->ctl = ctl; in tb_ctl_pkg_alloc()
313 pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, in tb_ctl_pkg_alloc()
314 &pkg->frame.buffer_phy); in tb_ctl_pkg_alloc()
315 if (!pkg->buffer) { in tb_ctl_pkg_alloc()
316 kfree(pkg); in tb_ctl_pkg_alloc()
319 return pkg; in tb_ctl_pkg_alloc()
328 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); in tb_ctl_tx_callback() local
329 tb_ctl_pkg_free(pkg); in tb_ctl_tx_callback()
343 struct ctl_pkg *pkg; in tb_ctl_tx() local
353 pkg = tb_ctl_pkg_alloc(ctl); in tb_ctl_tx()
354 if (!pkg) in tb_ctl_tx()
356 pkg->frame.callback = tb_ctl_tx_callback; in tb_ctl_tx()
357 pkg->frame.size = len + 4; in tb_ctl_tx()
358 pkg->frame.sof = type; in tb_ctl_tx()
359 pkg->frame.eof = type; in tb_ctl_tx()
360 cpu_to_be32_array(pkg->buffer, data, len / 4); in tb_ctl_tx()
361 *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); in tb_ctl_tx()
363 res = tb_ring_tx(ctl->tx, &pkg->frame); in tb_ctl_tx()
365 tb_ctl_pkg_free(pkg); in tb_ctl_tx()
373 struct ctl_pkg *pkg, size_t size) in tb_ctl_handle_event() argument
375 return ctl->callback(ctl->callback_data, type, pkg->buffer, size); in tb_ctl_handle_event()
378 static void tb_ctl_rx_submit(struct ctl_pkg *pkg) in tb_ctl_rx_submit() argument
380 tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* in tb_ctl_rx_submit()
388 static int tb_async_error(const struct ctl_pkg *pkg) in tb_async_error() argument
390 const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg; in tb_async_error()
392 if (pkg->frame.eof != TB_CFG_PKG_ERROR) in tb_async_error()
409 struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); in tb_ctl_rx_callback() local
420 tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", in tb_ctl_rx_callback()
426 crc32 = tb_crc(pkg->buffer, frame->size); in tb_ctl_rx_callback()
427 be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); in tb_ctl_rx_callback()
435 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { in tb_ctl_rx_callback()
436 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
440 if (tb_async_error(pkg)) { in tb_ctl_rx_callback()
441 tb_ctl_handle_event(pkg->ctl, frame->eof, in tb_ctl_rx_callback()
442 pkg, frame->size); in tb_ctl_rx_callback()
450 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { in tb_ctl_rx_callback()
451 tb_ctl_err(pkg->ctl, in tb_ctl_rx_callback()
457 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) in tb_ctl_rx_callback()
471 req = tb_cfg_request_find(pkg->ctl, pkg); in tb_ctl_rx_callback()
473 if (req->copy(req, pkg)) in tb_ctl_rx_callback()
479 tb_ctl_rx_submit(pkg); in tb_ctl_rx_callback()
718 struct cfg_error_pkg pkg = { in tb_cfg_error() local
724 return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); in tb_cfg_error()
728 const struct ctl_pkg *pkg) in tb_cfg_match() argument
730 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); in tb_cfg_match()
732 if (pkg->frame.eof == TB_CFG_PKG_ERROR) in tb_cfg_match()
735 if (pkg->frame.eof != req->response_type) in tb_cfg_match()
739 if (pkg->frame.size != req->response_size) in tb_cfg_match()
742 if (pkg->frame.eof == TB_CFG_PKG_READ || in tb_cfg_match()
743 pkg->frame.eof == TB_CFG_PKG_WRITE) { in tb_cfg_match()
745 const struct cfg_read_pkg *res_hdr = pkg->buffer; in tb_cfg_match()
754 static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) in tb_cfg_copy() argument
759 res = parse_header(pkg, req->response_size, req->response_type, in tb_cfg_copy()
762 memcpy(req->response, pkg->buffer, req->response_size); in tb_cfg_copy()