Lines Matching +full:prefetch +full:- +full:dma
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright 2005-2006 Fen Systems Ltd.
5 * Copyright 2005-2015 Solarflare Communications Inc.
34 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
37 * struct tso_state - TSO state for an SKB
42 * @dma_addr: DMA address of current position
45 * @unmap_addr: DMA address of SKB fragment
51 * @header_dma_addr: Header DMA address
52 * @header_unmap_len: Header DMA mapped length
84 ptr = (char *) (tx_queue->buffer + insert_ptr); in prefetch_ptr()
85 prefetch(ptr); in prefetch_ptr()
86 prefetch(ptr + 0x80); in prefetch_ptr()
88 ptr = (char *) (((efx_qword_t *)tx_queue->txd.buf.addr) + insert_ptr); in prefetch_ptr()
89 prefetch(ptr); in prefetch_ptr()
90 prefetch(ptr + 0x80); in prefetch_ptr()
94 * efx_tx_queue_insert - push descriptors onto the TX queue
96 * @dma_addr: DMA address of fragment
113 ++tx_queue->insert_count; in efx_tx_queue_insert()
115 EFX_WARN_ON_ONCE_PARANOID(tx_queue->insert_count - in efx_tx_queue_insert()
116 tx_queue->read_count >= in efx_tx_queue_insert()
117 tx_queue->efx->txq_entries); in efx_tx_queue_insert()
119 buffer->dma_addr = dma_addr; in efx_tx_queue_insert()
121 dma_len = tx_queue->efx->type->tx_limit_len(tx_queue, in efx_tx_queue_insert()
128 buffer->len = dma_len; in efx_tx_queue_insert()
129 buffer->flags = EFX_TX_BUF_CONT; in efx_tx_queue_insert()
131 len -= dma_len; in efx_tx_queue_insert()
135 buffer->len = len; in efx_tx_queue_insert()
145 __be16 protocol = skb->protocol; in efx_tso_check_protocol()
147 EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto != in efx_tso_check_protocol()
150 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; in efx_tso_check_protocol()
152 protocol = veh->h_vlan_encapsulated_proto; in efx_tso_check_protocol()
156 EFX_WARN_ON_ONCE_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); in efx_tso_check_protocol()
159 EFX_WARN_ON_ONCE_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); in efx_tso_check_protocol()
161 EFX_WARN_ON_ONCE_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + in efx_tso_check_protocol()
162 (tcp_hdr(skb)->doff << 2u)) > in efx_tso_check_protocol()
173 struct device *dma_dev = &efx->pci_dev->dev; in tso_start()
177 st->ip_off = skb_network_header(skb) - skb->data; in tso_start()
178 st->tcp_off = skb_transport_header(skb) - skb->data; in tso_start()
179 header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); in tso_start()
180 in_len = skb_headlen(skb) - header_len; in tso_start()
181 st->header_len = header_len; in tso_start()
182 st->in_len = in_len; in tso_start()
183 if (st->protocol == htons(ETH_P_IP)) { in tso_start()
184 st->ip_base_len = st->header_len - st->ip_off; in tso_start()
185 st->ipv4_id = ntohs(ip_hdr(skb)->id); in tso_start()
187 st->ip_base_len = st->header_len - st->tcp_off; in tso_start()
188 st->ipv4_id = 0; in tso_start()
190 st->seqnum = ntohl(tcp_hdr(skb)->seq); in tso_start()
192 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->urg); in tso_start()
193 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->syn); in tso_start()
194 EFX_WARN_ON_ONCE_PARANOID(tcp_hdr(skb)->rst); in tso_start()
196 st->out_len = skb->len - header_len; in tso_start()
198 dma_addr = dma_map_single(dma_dev, skb->data, in tso_start()
200 st->header_dma_addr = dma_addr; in tso_start()
201 st->header_unmap_len = skb_headlen(skb); in tso_start()
202 st->dma_addr = dma_addr + header_len; in tso_start()
203 st->unmap_len = 0; in tso_start()
205 return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0; in tso_start()
211 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, in tso_get_fragment()
213 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { in tso_get_fragment()
214 st->unmap_len = skb_frag_size(frag); in tso_get_fragment()
215 st->in_len = skb_frag_size(frag); in tso_get_fragment()
216 st->dma_addr = st->unmap_addr; in tso_get_fragment()
219 return -ENOMEM; in tso_get_fragment()
224 * tso_fill_packet_with_fragment - form descriptors for the current fragment
230 * of fragment or end-of-packet.
239 if (st->in_len == 0) in tso_fill_packet_with_fragment()
241 if (st->packet_space == 0) in tso_fill_packet_with_fragment()
244 EFX_WARN_ON_ONCE_PARANOID(st->in_len <= 0); in tso_fill_packet_with_fragment()
245 EFX_WARN_ON_ONCE_PARANOID(st->packet_space <= 0); in tso_fill_packet_with_fragment()
247 n = min(st->in_len, st->packet_space); in tso_fill_packet_with_fragment()
249 st->packet_space -= n; in tso_fill_packet_with_fragment()
250 st->out_len -= n; in tso_fill_packet_with_fragment()
251 st->in_len -= n; in tso_fill_packet_with_fragment()
253 efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); in tso_fill_packet_with_fragment()
255 if (st->out_len == 0) { in tso_fill_packet_with_fragment()
257 buffer->skb = skb; in tso_fill_packet_with_fragment()
258 buffer->flags = EFX_TX_BUF_SKB; in tso_fill_packet_with_fragment()
259 } else if (st->packet_space != 0) { in tso_fill_packet_with_fragment()
260 buffer->flags = EFX_TX_BUF_CONT; in tso_fill_packet_with_fragment()
263 if (st->in_len == 0) { in tso_fill_packet_with_fragment()
264 /* Transfer ownership of the DMA mapping */ in tso_fill_packet_with_fragment()
265 buffer->unmap_len = st->unmap_len; in tso_fill_packet_with_fragment()
266 buffer->dma_offset = buffer->unmap_len - buffer->len; in tso_fill_packet_with_fragment()
267 st->unmap_len = 0; in tso_fill_packet_with_fragment()
270 st->dma_addr += n; in tso_fill_packet_with_fragment()
277 * tso_start_new_packet - generate a new header and prepare for the new packet
283 * success, or -%ENOMEM if failed to alloc header, or other negative error.
291 bool is_last = st->out_len <= skb_shinfo(skb)->gso_size; in tso_start_new_packet()
295 st->packet_space = skb_shinfo(skb)->gso_size; in tso_start_new_packet()
298 st->packet_space = st->out_len; in tso_start_new_packet()
302 if (WARN_ON(!st->header_unmap_len)) in tso_start_new_packet()
303 return -EINVAL; in tso_start_new_packet()
309 buffer->flags = EFX_TX_BUF_OPTION; in tso_start_new_packet()
310 buffer->len = 0; in tso_start_new_packet()
311 buffer->unmap_len = 0; in tso_start_new_packet()
312 EFX_POPULATE_QWORD_5(buffer->option, in tso_start_new_packet()
317 ESF_DZ_TX_TSO_IP_ID, st->ipv4_id, in tso_start_new_packet()
318 ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum); in tso_start_new_packet()
319 ++tx_queue->insert_count; in tso_start_new_packet()
325 buffer->dma_addr = st->header_dma_addr; in tso_start_new_packet()
326 buffer->len = st->header_len; in tso_start_new_packet()
328 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE; in tso_start_new_packet()
329 buffer->unmap_len = st->header_unmap_len; in tso_start_new_packet()
330 buffer->dma_offset = 0; in tso_start_new_packet()
332 * later DMA mapping error and rollback in tso_start_new_packet()
334 st->header_unmap_len = 0; in tso_start_new_packet()
336 buffer->flags = EFX_TX_BUF_CONT; in tso_start_new_packet()
337 buffer->unmap_len = 0; in tso_start_new_packet()
339 ++tx_queue->insert_count; in tso_start_new_packet()
341 st->seqnum += skb_shinfo(skb)->gso_size; in tso_start_new_packet()
344 ++st->ipv4_id; in tso_start_new_packet()
350 * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer
366 struct efx_nic *efx = tx_queue->efx; in efx_enqueue_skb_tso()
370 if (tx_queue->tso_version != 1) in efx_enqueue_skb_tso()
371 return -EINVAL; in efx_enqueue_skb_tso()
373 prefetch(skb->data); in efx_enqueue_skb_tso()
375 /* Find the packet protocol and sanity-check it */ in efx_enqueue_skb_tso()
378 EFX_WARN_ON_ONCE_PARANOID(tx_queue->write_count != tx_queue->insert_count); in efx_enqueue_skb_tso()
386 EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->nr_frags < 1); in efx_enqueue_skb_tso()
389 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
394 frag_i = -1; in efx_enqueue_skb_tso()
408 if (++frag_i >= skb_shinfo(skb)->nr_frags) in efx_enqueue_skb_tso()
412 skb_shinfo(skb)->frags + frag_i); in efx_enqueue_skb_tso()
430 if (rc == -ENOMEM) in efx_enqueue_skb_tso()
431 netif_err(efx, tx_err, efx->net_dev, in efx_enqueue_skb_tso()
432 "Out of memory for TSO headers, or DMA mapping error\n"); in efx_enqueue_skb_tso()
434 netif_err(efx, tx_err, efx->net_dev, "TSO failed, rc = %d\n", rc); in efx_enqueue_skb_tso()
436 /* Free the DMA mapping we were in the process of writing out */ in efx_enqueue_skb_tso()
438 dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr, in efx_enqueue_skb_tso()
442 /* Free the header DMA mapping */ in efx_enqueue_skb_tso()
444 dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr, in efx_enqueue_skb_tso()