Lines Matching full:fifo

26  * gve_tx_fifo_* manages the Registered Segment as a FIFO - clients must
30 static int gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_fifo *fifo) in gve_tx_fifo_init() argument
32 fifo->base = vmap(fifo->qpl->pages, fifo->qpl->num_entries, VM_MAP, in gve_tx_fifo_init()
34 if (unlikely(!fifo->base)) { in gve_tx_fifo_init()
35 netif_err(priv, drv, priv->dev, "Failed to vmap fifo, qpl_id = %d\n", in gve_tx_fifo_init()
36 fifo->qpl->id); in gve_tx_fifo_init()
40 fifo->size = fifo->qpl->num_entries * PAGE_SIZE; in gve_tx_fifo_init()
41 atomic_set(&fifo->available, fifo->size); in gve_tx_fifo_init()
42 fifo->head = 0; in gve_tx_fifo_init()
46 static void gve_tx_fifo_release(struct gve_priv *priv, struct gve_tx_fifo *fifo) in gve_tx_fifo_release() argument
48 WARN(atomic_read(&fifo->available) != fifo->size, in gve_tx_fifo_release()
49 "Releasing non-empty fifo"); in gve_tx_fifo_release()
51 vunmap(fifo->base); in gve_tx_fifo_release()
54 static int gve_tx_fifo_pad_alloc_one_frag(struct gve_tx_fifo *fifo, in gve_tx_fifo_pad_alloc_one_frag() argument
57 return (fifo->head + bytes < fifo->size) ? 0 : fifo->size - fifo->head; in gve_tx_fifo_pad_alloc_one_frag()
60 static bool gve_tx_fifo_can_alloc(struct gve_tx_fifo *fifo, size_t bytes) in gve_tx_fifo_can_alloc() argument
62 return (atomic_read(&fifo->available) <= bytes) ? false : true; in gve_tx_fifo_can_alloc()
65 /* gve_tx_alloc_fifo - Allocate fragment(s) from Tx FIFO
66 * @fifo: FIFO to allocate from
72 * Allocations from a given FIFO must be externally synchronized but concurrent
75 static int gve_tx_alloc_fifo(struct gve_tx_fifo *fifo, size_t bytes, in gve_tx_alloc_fifo() argument
87 * because the FIFO head always start aligned, and the FIFO's boundaries in gve_tx_alloc_fifo()
91 WARN(!gve_tx_fifo_can_alloc(fifo, bytes), in gve_tx_alloc_fifo()
92 "Reached %s when there's not enough space in the fifo", __func__); in gve_tx_alloc_fifo()
96 iov[0].iov_offset = fifo->head; in gve_tx_alloc_fifo()
98 fifo->head += bytes; in gve_tx_alloc_fifo()
100 if (fifo->head > fifo->size) { in gve_tx_alloc_fifo()
102 * FIFO, also use the head fragment. in gve_tx_alloc_fifo()
105 overflow = fifo->head - fifo->size; in gve_tx_alloc_fifo()
107 iov[1].iov_offset = 0; /* Start of fifo*/ in gve_tx_alloc_fifo()
110 fifo->head = overflow; in gve_tx_alloc_fifo()
114 aligned_head = L1_CACHE_ALIGN(fifo->head); in gve_tx_alloc_fifo()
115 padding = aligned_head - fifo->head; in gve_tx_alloc_fifo()
117 atomic_sub(bytes + padding, &fifo->available); in gve_tx_alloc_fifo()
118 fifo->head = aligned_head; in gve_tx_alloc_fifo()
120 if (fifo->head == fifo->size) in gve_tx_alloc_fifo()
121 fifo->head = 0; in gve_tx_alloc_fifo()
126 /* gve_tx_free_fifo - Return space to Tx FIFO
127 * @fifo: FIFO to return fragments to
130 static void gve_tx_free_fifo(struct gve_tx_fifo *fifo, size_t bytes) in gve_tx_free_fifo() argument
132 atomic_add(bytes, &fifo->available); in gve_tx_free_fifo()
201 /* map Tx FIFO */ in gve_tx_alloc_ring()
303 * 1 if the payload wraps to the beginning of the FIFO
322 /* Check if sufficient resources (descriptor ring space, FIFO space) are
467 * of the fifo and then put the header at the beginning of the fifo. in gve_tx_add_skb_copy()
683 /* FIFO free */ in gve_clean_tx_done()