Lines Matching +full:pmsg +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2003-2016, Intel Corporation.
12 #include <linux/dma-mapping.h>
20 int size; in ishtp_cl_get_tx_free_buffer_size() local
22 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); in ishtp_cl_get_tx_free_buffer_size()
23 size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length; in ishtp_cl_get_tx_free_buffer_size()
24 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); in ishtp_cl_get_tx_free_buffer_size()
26 return size; in ishtp_cl_get_tx_free_buffer_size()
32 return cl->tx_ring_free_size; in ishtp_cl_get_tx_free_rings()
37 * ishtp_read_list_flush() - Flush read queue
48 spin_lock_irqsave(&cl->dev->read_list_spinlock, flags); in ishtp_read_list_flush()
49 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) in ishtp_read_list_flush()
50 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { in ishtp_read_list_flush()
51 list_del(&rb->list); in ishtp_read_list_flush()
54 spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags); in ishtp_read_list_flush()
58 * ishtp_cl_flush_queues() - Flush all queues for a client
64 * Return: 0 on success else -EINVAL if device is NULL
68 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_flush_queues()
69 return -EINVAL; in ishtp_cl_flush_queues()
78 * ishtp_cl_init() - Initialize all fields of a client device
88 init_waitqueue_head(&cl->wait_ctrl_res); in ishtp_cl_init()
89 spin_lock_init(&cl->free_list_spinlock); in ishtp_cl_init()
90 spin_lock_init(&cl->in_process_spinlock); in ishtp_cl_init()
91 spin_lock_init(&cl->tx_list_spinlock); in ishtp_cl_init()
92 spin_lock_init(&cl->tx_free_list_spinlock); in ishtp_cl_init()
93 spin_lock_init(&cl->fc_spinlock); in ishtp_cl_init()
94 INIT_LIST_HEAD(&cl->link); in ishtp_cl_init()
95 cl->dev = dev; in ishtp_cl_init()
97 INIT_LIST_HEAD(&cl->free_rb_list.list); in ishtp_cl_init()
98 INIT_LIST_HEAD(&cl->tx_list.list); in ishtp_cl_init()
99 INIT_LIST_HEAD(&cl->tx_free_list.list); in ishtp_cl_init()
100 INIT_LIST_HEAD(&cl->in_process_list.list); in ishtp_cl_init()
102 cl->rx_ring_size = CL_DEF_RX_RING_SIZE; in ishtp_cl_init()
103 cl->tx_ring_size = CL_DEF_TX_RING_SIZE; in ishtp_cl_init()
104 cl->tx_ring_free_size = cl->tx_ring_size; in ishtp_cl_init()
107 cl->last_tx_path = CL_TX_PATH_IPC; in ishtp_cl_init()
108 cl->last_dma_acked = 1; in ishtp_cl_init()
109 cl->last_dma_addr = NULL; in ishtp_cl_init()
110 cl->last_ipc_acked = 1; in ishtp_cl_init()
114 * ishtp_cl_allocate() - allocates client structure and sets it up.
129 ishtp_cl_init(cl, cl_device->ishtp_dev); in ishtp_cl_allocate()
135 * ishtp_cl_free() - Frees a client device
148 dev = cl->dev; in ishtp_cl_free()
152 spin_lock_irqsave(&dev->cl_list_lock, flags); in ishtp_cl_free()
156 spin_unlock_irqrestore(&dev->cl_list_lock, flags); in ishtp_cl_free()
161 * ishtp_cl_link() - Reserve a host id and link the client instance
177 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_link()
178 return -EINVAL; in ishtp_cl_link()
180 dev = cl->dev; in ishtp_cl_link()
182 spin_lock_irqsave(&dev->device_lock, flags); in ishtp_cl_link()
184 if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) { in ishtp_cl_link()
185 ret = -EMFILE; in ishtp_cl_link()
189 id = find_first_zero_bit(dev->host_clients_map, ISHTP_CLIENTS_MAX); in ishtp_cl_link()
192 spin_unlock_irqrestore(&dev->device_lock, flags); in ishtp_cl_link()
193 dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX); in ishtp_cl_link()
194 return -ENOENT; in ishtp_cl_link()
197 dev->open_handle_count++; in ishtp_cl_link()
198 cl->host_client_id = id; in ishtp_cl_link()
199 spin_lock_irqsave(&dev->cl_list_lock, flags_cl); in ishtp_cl_link()
200 if (dev->dev_state != ISHTP_DEV_ENABLED) { in ishtp_cl_link()
201 ret = -ENODEV; in ishtp_cl_link()
204 list_add_tail(&cl->link, &dev->cl_list); in ishtp_cl_link()
205 set_bit(id, dev->host_clients_map); in ishtp_cl_link()
206 cl->state = ISHTP_CL_INITIALIZING; in ishtp_cl_link()
209 spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl); in ishtp_cl_link()
211 spin_unlock_irqrestore(&dev->device_lock, flags); in ishtp_cl_link()
217 * ishtp_cl_unlink() - remove fw_cl from the client device list
229 if (!cl || !cl->dev) in ishtp_cl_unlink()
232 dev = cl->dev; in ishtp_cl_unlink()
234 spin_lock_irqsave(&dev->device_lock, flags); in ishtp_cl_unlink()
235 if (dev->open_handle_count > 0) { in ishtp_cl_unlink()
236 clear_bit(cl->host_client_id, dev->host_clients_map); in ishtp_cl_unlink()
237 dev->open_handle_count--; in ishtp_cl_unlink()
239 spin_unlock_irqrestore(&dev->device_lock, flags); in ishtp_cl_unlink()
245 spin_lock_irqsave(&dev->cl_list_lock, flags); in ishtp_cl_unlink()
246 list_for_each_entry(pos, &dev->cl_list, link) in ishtp_cl_unlink()
247 if (cl->host_client_id == pos->host_client_id) { in ishtp_cl_unlink()
248 list_del_init(&pos->link); in ishtp_cl_unlink()
251 spin_unlock_irqrestore(&dev->cl_list_lock, flags); in ishtp_cl_unlink()
256 * ishtp_cl_disconnect() - Send disconnect request to firmware
268 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_disconnect()
269 return -ENODEV; in ishtp_cl_disconnect()
271 dev = cl->dev; in ishtp_cl_disconnect()
273 dev->print_log(dev, "%s() state %d\n", __func__, cl->state); in ishtp_cl_disconnect()
275 if (cl->state != ISHTP_CL_DISCONNECTING) { in ishtp_cl_disconnect()
276 dev->print_log(dev, "%s() Disconnect in progress\n", __func__); in ishtp_cl_disconnect()
281 dev->print_log(dev, "%s() Failed to disconnect\n", __func__); in ishtp_cl_disconnect()
282 dev_err(&cl->device->dev, "failed to disconnect.\n"); in ishtp_cl_disconnect()
283 return -ENODEV; in ishtp_cl_disconnect()
286 wait_event_interruptible_timeout(cl->wait_ctrl_res, in ishtp_cl_disconnect()
287 (dev->dev_state != ISHTP_DEV_ENABLED || in ishtp_cl_disconnect()
288 cl->state == ISHTP_CL_DISCONNECTED), in ishtp_cl_disconnect()
292 * If FW reset arrived, this will happen. Don't check cl->, in ishtp_cl_disconnect()
295 if (dev->dev_state != ISHTP_DEV_ENABLED) { in ishtp_cl_disconnect()
296 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n", in ishtp_cl_disconnect()
298 return -ENODEV; in ishtp_cl_disconnect()
301 if (cl->state == ISHTP_CL_DISCONNECTED) { in ishtp_cl_disconnect()
302 dev->print_log(dev, "%s() successful\n", __func__); in ishtp_cl_disconnect()
306 return -ENODEV; in ishtp_cl_disconnect()
311 * ishtp_cl_is_other_connecting() - Check other client is connecting
324 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_is_other_connecting()
327 dev = cl->dev; in ishtp_cl_is_other_connecting()
328 spin_lock_irqsave(&dev->cl_list_lock, flags); in ishtp_cl_is_other_connecting()
329 list_for_each_entry(pos, &dev->cl_list, link) { in ishtp_cl_is_other_connecting()
330 if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) && in ishtp_cl_is_other_connecting()
331 cl->fw_client_id == pos->fw_client_id) { in ishtp_cl_is_other_connecting()
332 spin_unlock_irqrestore(&dev->cl_list_lock, flags); in ishtp_cl_is_other_connecting()
336 spin_unlock_irqrestore(&dev->cl_list_lock, flags); in ishtp_cl_is_other_connecting()
342 * ishtp_cl_connect() - Send connect request to firmware
356 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_connect()
357 return -ENODEV; in ishtp_cl_connect()
359 dev = cl->dev; in ishtp_cl_connect()
361 dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state); in ishtp_cl_connect()
364 dev->print_log(dev, "%s() Busy\n", __func__); in ishtp_cl_connect()
365 return -EBUSY; in ishtp_cl_connect()
369 dev->print_log(dev, "%s() HBM connect req fail\n", __func__); in ishtp_cl_connect()
370 return -ENODEV; in ishtp_cl_connect()
373 rets = wait_event_interruptible_timeout(cl->wait_ctrl_res, in ishtp_cl_connect()
374 (dev->dev_state == ISHTP_DEV_ENABLED && in ishtp_cl_connect()
375 (cl->state == ISHTP_CL_CONNECTED || in ishtp_cl_connect()
376 cl->state == ISHTP_CL_DISCONNECTED)), in ishtp_cl_connect()
380 * If FW reset arrived, this will happen. Don't check cl->, in ishtp_cl_connect()
383 if (dev->dev_state != ISHTP_DEV_ENABLED) { in ishtp_cl_connect()
384 dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n", in ishtp_cl_connect()
386 return -EFAULT; in ishtp_cl_connect()
389 if (cl->state != ISHTP_CL_CONNECTED) { in ishtp_cl_connect()
390 dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n", in ishtp_cl_connect()
392 return -EFAULT; in ishtp_cl_connect()
395 rets = cl->status; in ishtp_cl_connect()
397 dev->print_log(dev, "%s() Invalid status\n", __func__); in ishtp_cl_connect()
403 dev->print_log(dev, "%s() Bind error\n", __func__); in ishtp_cl_connect()
410 dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__); in ishtp_cl_connect()
418 dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__); in ishtp_cl_connect()
425 /* Upon successful connection and allocation, emit flow-control */ in ishtp_cl_connect()
428 dev->print_log(dev, "%s() successful\n", __func__); in ishtp_cl_connect()
435 * ishtp_cl_read_start() - Prepare to read client message
453 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_read_start()
454 return -ENODEV; in ishtp_cl_read_start()
456 dev = cl->dev; in ishtp_cl_read_start()
458 if (cl->state != ISHTP_CL_CONNECTED) in ishtp_cl_read_start()
459 return -ENODEV; in ishtp_cl_read_start()
461 if (dev->dev_state != ISHTP_DEV_ENABLED) in ishtp_cl_read_start()
462 return -ENODEV; in ishtp_cl_read_start()
464 i = ishtp_fw_cl_by_id(dev, cl->fw_client_id); in ishtp_cl_read_start()
466 dev_err(&cl->device->dev, "no such fw client %d\n", in ishtp_cl_read_start()
467 cl->fw_client_id); in ishtp_cl_read_start()
468 return -ENODEV; in ishtp_cl_read_start()
472 spin_lock_irqsave(&cl->free_list_spinlock, flags); in ishtp_cl_read_start()
473 if (list_empty(&cl->free_rb_list.list)) { in ishtp_cl_read_start()
474 dev_warn(&cl->device->dev, in ishtp_cl_read_start()
475 "[ishtp-ish] Rx buffers pool is empty\n"); in ishtp_cl_read_start()
476 rets = -ENOMEM; in ishtp_cl_read_start()
478 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); in ishtp_cl_read_start()
481 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list); in ishtp_cl_read_start()
482 list_del_init(&rb->list); in ishtp_cl_read_start()
483 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); in ishtp_cl_read_start()
485 rb->cl = cl; in ishtp_cl_read_start()
486 rb->buf_idx = 0; in ishtp_cl_read_start()
488 INIT_LIST_HEAD(&rb->list); in ishtp_cl_read_start()
492 * This must be BEFORE sending flow control - in ishtp_cl_read_start()
495 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); in ishtp_cl_read_start()
496 list_add_tail(&rb->list, &dev->read_list.list); in ishtp_cl_read_start()
497 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); in ishtp_cl_read_start()
499 rets = -ENODEV; in ishtp_cl_read_start()
505 spin_lock_irqsave(&dev->read_list_spinlock, dev_flags); in ishtp_cl_read_start()
506 list_del(&rb->list); in ishtp_cl_read_start()
507 spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags); in ishtp_cl_read_start()
509 spin_lock_irqsave(&cl->free_list_spinlock, flags); in ishtp_cl_read_start()
510 list_add_tail(&rb->list, &cl->free_rb_list.list); in ishtp_cl_read_start()
511 spin_unlock_irqrestore(&cl->free_list_spinlock, flags); in ishtp_cl_read_start()
517 * ishtp_cl_send() - Send a message to firmware
536 if (WARN_ON(!cl || !cl->dev)) in ishtp_cl_send()
537 return -ENODEV; in ishtp_cl_send()
539 dev = cl->dev; in ishtp_cl_send()
541 if (cl->state != ISHTP_CL_CONNECTED) { in ishtp_cl_send()
542 ++cl->err_send_msg; in ishtp_cl_send()
543 return -EPIPE; in ishtp_cl_send()
546 if (dev->dev_state != ISHTP_DEV_ENABLED) { in ishtp_cl_send()
547 ++cl->err_send_msg; in ishtp_cl_send()
548 return -ENODEV; in ishtp_cl_send()
552 id = ishtp_fw_cl_by_id(dev, cl->fw_client_id); in ishtp_cl_send()
554 ++cl->err_send_msg; in ishtp_cl_send()
555 return -ENOENT; in ishtp_cl_send()
558 if (length > dev->fw_clients[id].props.max_msg_length) { in ishtp_cl_send()
559 ++cl->err_send_msg; in ishtp_cl_send()
560 return -EMSGSIZE; in ishtp_cl_send()
564 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); in ishtp_cl_send()
565 if (list_empty(&cl->tx_free_list.list)) { in ishtp_cl_send()
566 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, in ishtp_cl_send()
568 ++cl->err_send_msg; in ishtp_cl_send()
569 return -ENOMEM; in ishtp_cl_send()
572 cl_msg = list_first_entry(&cl->tx_free_list.list, in ishtp_cl_send()
574 if (!cl_msg->send_buf.data) { in ishtp_cl_send()
575 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, in ishtp_cl_send()
577 return -EIO; in ishtp_cl_send()
578 /* Should not happen, as free list is pre-allocated */ in ishtp_cl_send()
582 * max ISHTP message size per client in ishtp_cl_send()
584 list_del_init(&cl_msg->list); in ishtp_cl_send()
585 --cl->tx_ring_free_size; in ishtp_cl_send()
587 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); in ishtp_cl_send()
588 memcpy(cl_msg->send_buf.data, buf, length); in ishtp_cl_send()
589 cl_msg->send_buf.size = length; in ishtp_cl_send()
590 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); in ishtp_cl_send()
591 have_msg_to_send = !list_empty(&cl->tx_list.list); in ishtp_cl_send()
592 list_add_tail(&cl_msg->list, &cl->tx_list.list); in ishtp_cl_send()
593 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); in ishtp_cl_send()
595 if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0) in ishtp_cl_send()
603 * ishtp_cl_read_complete() - read complete
613 struct ishtp_cl *cl = rb->cl; in ishtp_cl_read_complete()
615 spin_lock_irqsave(&cl->in_process_spinlock, flags); in ishtp_cl_read_complete()
617 * if in-process list is empty, then need to schedule in ishtp_cl_read_complete()
620 schedule_work_flag = list_empty(&cl->in_process_list.list); in ishtp_cl_read_complete()
621 list_add_tail(&rb->list, &cl->in_process_list.list); in ishtp_cl_read_complete()
622 spin_unlock_irqrestore(&cl->in_process_spinlock, flags); in ishtp_cl_read_complete()
625 ishtp_cl_bus_rx_event(cl->device); in ishtp_cl_read_complete()
629 * ipc_tx_send() - IPC tx send function
633 * if message size is bigger than IPC FIFO size, and all
641 struct ishtp_device *dev = (cl ? cl->dev : NULL); in ipc_tx_send()
644 unsigned char *pmsg; in ipc_tx_send() local
653 if (dev->dev_state != ISHTP_DEV_ENABLED) in ipc_tx_send()
656 if (cl->state != ISHTP_CL_CONNECTED) in ipc_tx_send()
659 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); in ipc_tx_send()
660 if (list_empty(&cl->tx_list.list)) { in ipc_tx_send()
661 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); in ipc_tx_send()
665 if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) { in ipc_tx_send()
666 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); in ipc_tx_send()
670 if (!cl->sending) { in ipc_tx_send()
671 --cl->ishtp_flow_ctrl_creds; in ipc_tx_send()
672 cl->last_ipc_acked = 0; in ipc_tx_send()
673 cl->last_tx_path = CL_TX_PATH_IPC; in ipc_tx_send()
674 cl->sending = 1; in ipc_tx_send()
677 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring, in ipc_tx_send()
679 rem = cl_msg->send_buf.size - cl->tx_offs; in ipc_tx_send()
682 ishtp_hdr.host_addr = cl->host_client_id; in ipc_tx_send()
683 ishtp_hdr.fw_addr = cl->fw_client_id; in ipc_tx_send()
685 pmsg = cl_msg->send_buf.data + cl->tx_offs; in ipc_tx_send()
687 if (rem <= dev->mtu) { in ipc_tx_send()
692 ishtp_write_message(dev, &ishtp_hdr, pmsg); in ipc_tx_send()
693 cl->tx_offs = 0; in ipc_tx_send()
694 cl->sending = 0; in ipc_tx_send()
699 ishtp_hdr.length = dev->mtu; in ipc_tx_send()
702 ishtp_write_message(dev, &ishtp_hdr, pmsg); in ipc_tx_send()
703 cl->tx_offs += dev->mtu; in ipc_tx_send()
704 rem = cl_msg->send_buf.size - cl->tx_offs; in ipc_tx_send()
708 list_del_init(&cl_msg->list); in ipc_tx_send()
709 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); in ipc_tx_send()
711 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); in ipc_tx_send()
712 list_add_tail(&cl_msg->list, &cl->tx_free_list.list); in ipc_tx_send()
713 ++cl->tx_ring_free_size; in ipc_tx_send()
714 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, in ipc_tx_send()
719 * ishtp_cl_send_msg_ipc() -Send message using IPC
729 if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0) in ishtp_cl_send_msg_ipc()
732 cl->tx_offs = 0; in ishtp_cl_send_msg_ipc()
734 ++cl->send_msg_cnt_ipc; in ishtp_cl_send_msg_ipc()
738 * ishtp_cl_send_msg_dma() -Send message using DMA
755 if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0) in ishtp_cl_send_msg_dma()
758 spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags); in ishtp_cl_send_msg_dma()
759 if (list_empty(&cl->tx_list.list)) { in ishtp_cl_send_msg_dma()
760 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); in ishtp_cl_send_msg_dma()
764 cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring, in ishtp_cl_send_msg_dma()
767 msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size); in ishtp_cl_send_msg_dma()
769 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); in ishtp_cl_send_msg_dma()
770 if (dev->transfer_path == CL_TX_PATH_DEFAULT) in ishtp_cl_send_msg_dma()
775 list_del_init(&cl_msg->list); /* Must be before write */ in ishtp_cl_send_msg_dma()
776 spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags); in ishtp_cl_send_msg_dma()
778 --cl->ishtp_flow_ctrl_creds; in ishtp_cl_send_msg_dma()
779 cl->last_dma_acked = 0; in ishtp_cl_send_msg_dma()
780 cl->last_dma_addr = msg_addr; in ishtp_cl_send_msg_dma()
781 cl->last_tx_path = CL_TX_PATH_DMA; in ishtp_cl_send_msg_dma()
784 memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size); in ishtp_cl_send_msg_dma()
790 if (dev->ops->dma_no_cache_snooping && in ishtp_cl_send_msg_dma()
791 dev->ops->dma_no_cache_snooping(dev)) in ishtp_cl_send_msg_dma()
792 clflush_cache_range(msg_addr, cl_msg->send_buf.size); in ishtp_cl_send_msg_dma()
795 off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf; in ishtp_cl_send_msg_dma()
798 dma_xfer.fw_client_id = cl->fw_client_id; in ishtp_cl_send_msg_dma()
799 dma_xfer.host_client_id = cl->host_client_id; in ishtp_cl_send_msg_dma()
801 dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off; in ishtp_cl_send_msg_dma()
802 dma_xfer.msg_length = cl_msg->send_buf.size; in ishtp_cl_send_msg_dma()
805 spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags); in ishtp_cl_send_msg_dma()
806 list_add_tail(&cl_msg->list, &cl->tx_free_list.list); in ishtp_cl_send_msg_dma()
807 ++cl->tx_ring_free_size; in ishtp_cl_send_msg_dma()
808 spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags); in ishtp_cl_send_msg_dma()
809 ++cl->send_msg_cnt_dma; in ishtp_cl_send_msg_dma()
813 * ishtp_cl_send_msg() -Send message using DMA or IPC
821 if (dev->transfer_path == CL_TX_PATH_DMA) in ishtp_cl_send_msg()
828 * recv_ishtp_cl_msg() -Receive client message
846 if (ishtp_hdr->reserved) { in recv_ishtp_cl_msg()
847 dev_err(dev->devc, "corrupted message header.\n"); in recv_ishtp_cl_msg()
851 if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) { in recv_ishtp_cl_msg()
852 dev_err(dev->devc, in recv_ishtp_cl_msg()
857 spin_lock_irqsave(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg()
858 rb_count = -1; in recv_ishtp_cl_msg()
859 list_for_each_entry(rb, &dev->read_list.list, list) { in recv_ishtp_cl_msg()
861 cl = rb->cl; in recv_ishtp_cl_msg()
862 if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr && in recv_ishtp_cl_msg()
863 cl->fw_client_id == ishtp_hdr->fw_addr) || in recv_ishtp_cl_msg()
864 !(cl->state == ISHTP_CL_CONNECTED)) in recv_ishtp_cl_msg()
868 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { in recv_ishtp_cl_msg()
869 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg()
870 dev_err(&cl->device->dev, in recv_ishtp_cl_msg()
872 list_del(&rb->list); in recv_ishtp_cl_msg()
874 cl->status = -ENOMEM; in recv_ishtp_cl_msg()
880 * size, drop message and return to free buffer. in recv_ishtp_cl_msg()
884 if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) { in recv_ishtp_cl_msg()
885 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg()
886 dev_err(&cl->device->dev, in recv_ishtp_cl_msg()
887 "message overflow. size %d len %d idx %ld\n", in recv_ishtp_cl_msg()
888 rb->buffer.size, ishtp_hdr->length, in recv_ishtp_cl_msg()
889 rb->buf_idx); in recv_ishtp_cl_msg()
890 list_del(&rb->list); in recv_ishtp_cl_msg()
892 cl->status = -EIO; in recv_ishtp_cl_msg()
896 buffer = rb->buffer.data + rb->buf_idx; in recv_ishtp_cl_msg()
897 dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length); in recv_ishtp_cl_msg()
899 rb->buf_idx += ishtp_hdr->length; in recv_ishtp_cl_msg()
900 if (ishtp_hdr->msg_complete) { in recv_ishtp_cl_msg()
901 /* Last fragment in message - it's complete */ in recv_ishtp_cl_msg()
902 cl->status = 0; in recv_ishtp_cl_msg()
903 list_del(&rb->list); in recv_ishtp_cl_msg()
906 --cl->out_flow_ctrl_creds; in recv_ishtp_cl_msg()
911 spin_lock(&cl->free_list_spinlock); in recv_ishtp_cl_msg()
913 if (!list_empty(&cl->free_rb_list.list)) { in recv_ishtp_cl_msg()
914 new_rb = list_entry(cl->free_rb_list.list.next, in recv_ishtp_cl_msg()
916 list_del_init(&new_rb->list); in recv_ishtp_cl_msg()
917 spin_unlock(&cl->free_list_spinlock); in recv_ishtp_cl_msg()
918 new_rb->cl = cl; in recv_ishtp_cl_msg()
919 new_rb->buf_idx = 0; in recv_ishtp_cl_msg()
920 INIT_LIST_HEAD(&new_rb->list); in recv_ishtp_cl_msg()
921 list_add_tail(&new_rb->list, in recv_ishtp_cl_msg()
922 &dev->read_list.list); in recv_ishtp_cl_msg()
926 spin_unlock(&cl->free_list_spinlock); in recv_ishtp_cl_msg()
930 ++cl->recv_msg_num_frags; in recv_ishtp_cl_msg()
939 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg()
944 dev_err(dev->devc, "Dropped Rx msg - no request\n"); in recv_ishtp_cl_msg()
945 dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length); in recv_ishtp_cl_msg()
950 cl = complete_rb->cl; in recv_ishtp_cl_msg()
951 cl->ts_rx = ktime_get(); in recv_ishtp_cl_msg()
952 ++cl->recv_msg_cnt_ipc; in recv_ishtp_cl_msg()
960 * recv_ishtp_cl_msg_dma() -Receive client message
978 spin_lock_irqsave(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg_dma()
980 list_for_each_entry(rb, &dev->read_list.list, list) { in recv_ishtp_cl_msg_dma()
981 cl = rb->cl; in recv_ishtp_cl_msg_dma()
982 if (!cl || !(cl->host_client_id == hbm->host_client_id && in recv_ishtp_cl_msg_dma()
983 cl->fw_client_id == hbm->fw_client_id) || in recv_ishtp_cl_msg_dma()
984 !(cl->state == ISHTP_CL_CONNECTED)) in recv_ishtp_cl_msg_dma()
990 if (rb->buffer.size == 0 || rb->buffer.data == NULL) { in recv_ishtp_cl_msg_dma()
991 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg_dma()
992 dev_err(&cl->device->dev, in recv_ishtp_cl_msg_dma()
994 list_del(&rb->list); in recv_ishtp_cl_msg_dma()
996 cl->status = -ENOMEM; in recv_ishtp_cl_msg_dma()
1002 * size, drop message and return to free buffer. in recv_ishtp_cl_msg_dma()
1006 if (rb->buffer.size < hbm->msg_length) { in recv_ishtp_cl_msg_dma()
1007 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg_dma()
1008 dev_err(&cl->device->dev, in recv_ishtp_cl_msg_dma()
1009 "message overflow. size %d len %d idx %ld\n", in recv_ishtp_cl_msg_dma()
1010 rb->buffer.size, hbm->msg_length, rb->buf_idx); in recv_ishtp_cl_msg_dma()
1011 list_del(&rb->list); in recv_ishtp_cl_msg_dma()
1013 cl->status = -EIO; in recv_ishtp_cl_msg_dma()
1017 buffer = rb->buffer.data; in recv_ishtp_cl_msg_dma()
1023 if (dev->ops->dma_no_cache_snooping && in recv_ishtp_cl_msg_dma()
1024 dev->ops->dma_no_cache_snooping(dev)) in recv_ishtp_cl_msg_dma()
1025 clflush_cache_range(msg, hbm->msg_length); in recv_ishtp_cl_msg_dma()
1027 memcpy(buffer, msg, hbm->msg_length); in recv_ishtp_cl_msg_dma()
1028 rb->buf_idx = hbm->msg_length; in recv_ishtp_cl_msg_dma()
1030 /* Last fragment in message - it's complete */ in recv_ishtp_cl_msg_dma()
1031 cl->status = 0; in recv_ishtp_cl_msg_dma()
1032 list_del(&rb->list); in recv_ishtp_cl_msg_dma()
1035 --cl->out_flow_ctrl_creds; in recv_ishtp_cl_msg_dma()
1040 spin_lock(&cl->free_list_spinlock); in recv_ishtp_cl_msg_dma()
1042 if (!list_empty(&cl->free_rb_list.list)) { in recv_ishtp_cl_msg_dma()
1043 new_rb = list_entry(cl->free_rb_list.list.next, in recv_ishtp_cl_msg_dma()
1045 list_del_init(&new_rb->list); in recv_ishtp_cl_msg_dma()
1046 spin_unlock(&cl->free_list_spinlock); in recv_ishtp_cl_msg_dma()
1047 new_rb->cl = cl; in recv_ishtp_cl_msg_dma()
1048 new_rb->buf_idx = 0; in recv_ishtp_cl_msg_dma()
1049 INIT_LIST_HEAD(&new_rb->list); in recv_ishtp_cl_msg_dma()
1050 list_add_tail(&new_rb->list, in recv_ishtp_cl_msg_dma()
1051 &dev->read_list.list); in recv_ishtp_cl_msg_dma()
1055 spin_unlock(&cl->free_list_spinlock); in recv_ishtp_cl_msg_dma()
1059 ++cl->recv_msg_num_frags; in recv_ishtp_cl_msg_dma()
1068 spin_unlock_irqrestore(&dev->read_list_spinlock, flags); in recv_ishtp_cl_msg_dma()
1071 dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n"); in recv_ishtp_cl_msg_dma()
1076 cl = complete_rb->cl; in recv_ishtp_cl_msg_dma()
1077 cl->ts_rx = ktime_get(); in recv_ishtp_cl_msg_dma()
1078 ++cl->recv_msg_cnt_dma; in recv_ishtp_cl_msg_dma()
1087 return cl->client_data; in ishtp_get_client_data()
1093 cl->client_data = data; in ishtp_set_client_data()
1099 return cl->dev; in ishtp_get_ishtp_device()
1103 void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size) in ishtp_set_tx_ring_size() argument
1105 cl->tx_ring_size = size; in ishtp_set_tx_ring_size()
1109 void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size) in ishtp_set_rx_ring_size() argument
1111 cl->rx_ring_size = size; in ishtp_set_rx_ring_size()
1117 cl->state = state; in ishtp_set_connection_state()
1123 cl->fw_client_id = fw_client_id; in ishtp_cl_set_fw_client_id()