Lines Matching +full:full +full:- +full:frame

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
47 * This structure is divided into two-cache aligned parts, the first is only
50 * lines, which is critical to performance and necessary in non-cache coherent
73 if (!ivc->peer) in tegra_ivc_invalidate()
76 dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN, in tegra_ivc_invalidate()
82 if (!ivc->peer) in tegra_ivc_flush()
85 dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN, in tegra_ivc_flush()
97 u32 tx = READ_ONCE(header->tx.count); in tegra_ivc_empty()
98 u32 rx = READ_ONCE(header->rx.count); in tegra_ivc_empty()
101 * Perform an over-full check to prevent denial of service attacks in tegra_ivc_empty()
104 * expected to check for full or over-full conditions. in tegra_ivc_empty()
110 if (tx - rx > ivc->num_frames) in tegra_ivc_empty()
119 u32 tx = READ_ONCE(header->tx.count); in tegra_ivc_full()
120 u32 rx = READ_ONCE(header->rx.count); in tegra_ivc_full()
124 * capacity also appear full. in tegra_ivc_full()
126 return tx - rx >= ivc->num_frames; in tegra_ivc_full()
132 u32 tx = READ_ONCE(header->tx.count); in tegra_ivc_available()
133 u32 rx = READ_ONCE(header->rx.count); in tegra_ivc_available()
137 * over-full situation can lead to denial of service attacks. See the in tegra_ivc_available()
139 * over-full considerations. in tegra_ivc_available()
141 return tx - rx; in tegra_ivc_available()
146 WRITE_ONCE(ivc->tx.channel->tx.count, in tegra_ivc_advance_tx()
147 READ_ONCE(ivc->tx.channel->tx.count) + 1); in tegra_ivc_advance_tx()
149 if (ivc->tx.position == ivc->num_frames - 1) in tegra_ivc_advance_tx()
150 ivc->tx.position = 0; in tegra_ivc_advance_tx()
152 ivc->tx.position++; in tegra_ivc_advance_tx()
157 WRITE_ONCE(ivc->rx.channel->rx.count, in tegra_ivc_advance_rx()
158 READ_ONCE(ivc->rx.channel->rx.count) + 1); in tegra_ivc_advance_rx()
160 if (ivc->rx.position == ivc->num_frames - 1) in tegra_ivc_advance_rx()
161 ivc->rx.position = 0; in tegra_ivc_advance_rx()
163 ivc->rx.position++; in tegra_ivc_advance_rx()
171 * tx.channel->state is set locally, so it is not synchronized with in tegra_ivc_check_read()
175 * asynchronous transition of rx.channel->state to in tegra_ivc_check_read()
178 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) in tegra_ivc_check_read()
179 return -ECONNRESET; in tegra_ivc_check_read()
186 * empty or full. in tegra_ivc_check_read()
188 if (!tegra_ivc_empty(ivc, ivc->rx.channel)) in tegra_ivc_check_read()
191 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); in tegra_ivc_check_read()
193 if (tegra_ivc_empty(ivc, ivc->rx.channel)) in tegra_ivc_check_read()
194 return -ENOSPC; in tegra_ivc_check_read()
203 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) in tegra_ivc_check_write()
204 return -ECONNRESET; in tegra_ivc_check_write()
206 if (!tegra_ivc_full(ivc, ivc->tx.channel)) in tegra_ivc_check_write()
209 tegra_ivc_invalidate(ivc, ivc->tx.phys + offset); in tegra_ivc_check_write()
211 if (tegra_ivc_full(ivc, ivc->tx.channel)) in tegra_ivc_check_write()
212 return -ENOSPC; in tegra_ivc_check_write()
219 unsigned int frame) in tegra_ivc_frame_virt() argument
221 if (WARN_ON(frame >= ivc->num_frames)) in tegra_ivc_frame_virt()
222 return ERR_PTR(-EINVAL); in tegra_ivc_frame_virt()
224 return (void *)(header + 1) + ivc->frame_size * frame; in tegra_ivc_frame_virt()
229 unsigned int frame) in tegra_ivc_frame_phys() argument
233 offset = sizeof(struct tegra_ivc_header) + ivc->frame_size * frame; in tegra_ivc_frame_phys()
240 unsigned int frame, in tegra_ivc_invalidate_frame() argument
244 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) in tegra_ivc_invalidate_frame()
247 phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; in tegra_ivc_invalidate_frame()
249 dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE); in tegra_ivc_invalidate_frame()
254 unsigned int frame, in tegra_ivc_flush_frame() argument
258 if (!ivc->peer || WARN_ON(frame >= ivc->num_frames)) in tegra_ivc_flush_frame()
261 phys = tegra_ivc_frame_phys(ivc, phys, frame) + offset; in tegra_ivc_flush_frame()
263 dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE); in tegra_ivc_flush_frame()
266 /* directly peek at the next frame rx'ed */
272 return ERR_PTR(-EINVAL); in tegra_ivc_read_get_next_frame()
279 * Order observation of ivc->rx.position potentially indicating new in tegra_ivc_read_get_next_frame()
284 tegra_ivc_invalidate_frame(ivc, ivc->rx.phys, ivc->rx.position, 0, in tegra_ivc_read_get_next_frame()
285 ivc->frame_size); in tegra_ivc_read_get_next_frame()
287 return tegra_ivc_frame_virt(ivc, ivc->rx.channel, ivc->rx.position); in tegra_ivc_read_get_next_frame()
299 * have already observed the channel non-empty. This check is just to in tegra_ivc_read_advance()
308 tegra_ivc_flush(ivc, ivc->rx.phys + rx); in tegra_ivc_read_advance()
311 * Ensure our write to ivc->rx.position occurs before our read from in tegra_ivc_read_advance()
312 * ivc->tx.position. in tegra_ivc_read_advance()
317 * Notify only upon transition from full to non-full. The available in tegra_ivc_read_advance()
319 * side-effect will be a spurious notification. in tegra_ivc_read_advance()
321 tegra_ivc_invalidate(ivc, ivc->rx.phys + tx); in tegra_ivc_read_advance()
323 if (tegra_ivc_available(ivc, ivc->rx.channel) == ivc->num_frames - 1) in tegra_ivc_read_advance()
324 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_read_advance()
330 /* directly poke at the next frame to be tx'ed */
339 return tegra_ivc_frame_virt(ivc, ivc->tx.channel, ivc->tx.position); in tegra_ivc_write_get_next_frame()
354 tegra_ivc_flush_frame(ivc, ivc->tx.phys, ivc->tx.position, 0, in tegra_ivc_write_advance()
355 ivc->frame_size); in tegra_ivc_write_advance()
358 * Order any possible stores to the frame before update of in tegra_ivc_write_advance()
359 * ivc->tx.position. in tegra_ivc_write_advance()
364 tegra_ivc_flush(ivc, ivc->tx.phys + tx); in tegra_ivc_write_advance()
367 * Ensure our write to ivc->tx.position occurs before our read from in tegra_ivc_write_advance()
368 * ivc->rx.position. in tegra_ivc_write_advance()
373 * Notify only upon transition from empty to non-empty. The available in tegra_ivc_write_advance()
375 * side-effect will be a spurious notification. in tegra_ivc_write_advance()
377 tegra_ivc_invalidate(ivc, ivc->tx.phys + rx); in tegra_ivc_write_advance()
379 if (tegra_ivc_available(ivc, ivc->tx.channel) == 1) in tegra_ivc_write_advance()
380 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_write_advance()
390 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_SYNC; in tegra_ivc_reset()
391 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_reset()
392 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_reset()
398 * IVC State Transition Table - see tegra_ivc_notified()
402 * ----- ------ -----------------------------------
422 tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); in tegra_ivc_notified()
423 state = READ_ONCE(ivc->rx.channel->tx.state); in tegra_ivc_notified()
439 ivc->tx.channel->tx.count = 0; in tegra_ivc_notified()
440 ivc->rx.channel->rx.count = 0; in tegra_ivc_notified()
442 ivc->tx.position = 0; in tegra_ivc_notified()
443 ivc->rx.position = 0; in tegra_ivc_notified()
455 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ACK; in tegra_ivc_notified()
456 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_notified()
461 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_notified()
463 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_SYNC && in tegra_ivc_notified()
478 ivc->tx.channel->tx.count = 0; in tegra_ivc_notified()
479 ivc->rx.channel->rx.count = 0; in tegra_ivc_notified()
481 ivc->tx.position = 0; in tegra_ivc_notified()
482 ivc->rx.position = 0; in tegra_ivc_notified()
495 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; in tegra_ivc_notified()
496 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_notified()
501 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_notified()
503 } else if (ivc->tx.channel->tx.state == TEGRA_IVC_STATE_ACK) { in tegra_ivc_notified()
519 ivc->tx.channel->tx.state = TEGRA_IVC_STATE_ESTABLISHED; in tegra_ivc_notified()
520 tegra_ivc_flush(ivc, ivc->tx.phys + offset); in tegra_ivc_notified()
525 ivc->notify(ivc, ivc->notify_data); in tegra_ivc_notified()
536 if (ivc->tx.channel->tx.state != TEGRA_IVC_STATE_ESTABLISHED) in tegra_ivc_notified()
537 return -EAGAIN; in tegra_ivc_notified()
552 pr_err("%s: queue_size (%u) must be %u-byte aligned\n", in tegra_ivc_total_queue_size()
573 return -EINVAL; in tegra_ivc_check_params()
577 pr_err("frame size not adequately aligned: %zu\n", frame_size); in tegra_ivc_check_params()
578 return -EINVAL; in tegra_ivc_check_params()
587 return -EINVAL; in tegra_ivc_check_params()
592 return -EINVAL; in tegra_ivc_check_params()
599 return -EINVAL; in tegra_ivc_check_params()
605 return -EINVAL; in tegra_ivc_check_params()
622 return -EINVAL; in tegra_ivc_init()
629 return -E2BIG; in tegra_ivc_init()
639 ivc->rx.phys = dma_map_single(peer, rx, queue_size, in tegra_ivc_init()
641 if (dma_mapping_error(peer, ivc->rx.phys)) in tegra_ivc_init()
642 return -ENOMEM; in tegra_ivc_init()
644 ivc->tx.phys = dma_map_single(peer, tx, queue_size, in tegra_ivc_init()
646 if (dma_mapping_error(peer, ivc->tx.phys)) { in tegra_ivc_init()
647 dma_unmap_single(peer, ivc->rx.phys, queue_size, in tegra_ivc_init()
649 return -ENOMEM; in tegra_ivc_init()
652 ivc->rx.phys = rx_phys; in tegra_ivc_init()
653 ivc->tx.phys = tx_phys; in tegra_ivc_init()
656 ivc->rx.channel = rx; in tegra_ivc_init()
657 ivc->tx.channel = tx; in tegra_ivc_init()
658 ivc->peer = peer; in tegra_ivc_init()
659 ivc->notify = notify; in tegra_ivc_init()
660 ivc->notify_data = data; in tegra_ivc_init()
661 ivc->frame_size = frame_size; in tegra_ivc_init()
662 ivc->num_frames = num_frames; in tegra_ivc_init()
668 ivc->tx.position = 0; in tegra_ivc_init()
669 ivc->rx.position = 0; in tegra_ivc_init()
677 if (ivc->peer) { in tegra_ivc_cleanup()
678 size_t size = tegra_ivc_total_queue_size(ivc->num_frames * in tegra_ivc_cleanup()
679 ivc->frame_size); in tegra_ivc_cleanup()
681 dma_unmap_single(ivc->peer, ivc->rx.phys, size, in tegra_ivc_cleanup()
683 dma_unmap_single(ivc->peer, ivc->tx.phys, size, in tegra_ivc_cleanup()