Lines Matching refs:self

61 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,  in aq_get_rxpages()  argument
73 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
74 self->stats.rx.pg_flips++; in aq_get_rxpages()
75 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
81 aq_nic_get_dev(self->aq_nic)); in aq_get_rxpages()
82 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
83 self->stats.rx.pg_losts++; in aq_get_rxpages()
84 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
88 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
89 self->stats.rx.pg_reuses++; in aq_get_rxpages()
90 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
96 aq_nic_get_dev(self->aq_nic)); in aq_get_rxpages()
98 u64_stats_update_begin(&self->stats.rx.syncp); in aq_get_rxpages()
99 self->stats.rx.alloc_fails++; in aq_get_rxpages()
100 u64_stats_update_end(&self->stats.rx.syncp); in aq_get_rxpages()
108 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, in aq_ring_alloc() argument
113 self->buff_ring = in aq_ring_alloc()
114 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL); in aq_ring_alloc()
116 if (!self->buff_ring) { in aq_ring_alloc()
120 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), in aq_ring_alloc()
121 self->size * self->dx_size, in aq_ring_alloc()
122 &self->dx_ring_pa, GFP_KERNEL); in aq_ring_alloc()
123 if (!self->dx_ring) { in aq_ring_alloc()
130 aq_ring_free(self); in aq_ring_alloc()
131 self = NULL; in aq_ring_alloc()
134 return self; in aq_ring_alloc()
137 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self, in aq_ring_tx_alloc() argument
144 self->aq_nic = aq_nic; in aq_ring_tx_alloc()
145 self->idx = idx; in aq_ring_tx_alloc()
146 self->size = aq_nic_cfg->txds; in aq_ring_tx_alloc()
147 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size; in aq_ring_tx_alloc()
149 self = aq_ring_alloc(self, aq_nic); in aq_ring_tx_alloc()
150 if (!self) { in aq_ring_tx_alloc()
157 aq_ring_free(self); in aq_ring_tx_alloc()
158 self = NULL; in aq_ring_tx_alloc()
161 return self; in aq_ring_tx_alloc()
164 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, in aq_ring_rx_alloc() argument
171 self->aq_nic = aq_nic; in aq_ring_rx_alloc()
172 self->idx = idx; in aq_ring_rx_alloc()
173 self->size = aq_nic_cfg->rxds; in aq_ring_rx_alloc()
174 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; in aq_ring_rx_alloc()
175 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + in aq_ring_rx_alloc()
178 if (aq_nic_cfg->rxpageorder > self->page_order) in aq_ring_rx_alloc()
179 self->page_order = aq_nic_cfg->rxpageorder; in aq_ring_rx_alloc()
181 self = aq_ring_alloc(self, aq_nic); in aq_ring_rx_alloc()
182 if (!self) { in aq_ring_rx_alloc()
189 aq_ring_free(self); in aq_ring_rx_alloc()
190 self = NULL; in aq_ring_rx_alloc()
193 return self; in aq_ring_rx_alloc()
197 aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, in aq_ring_hwts_rx_alloc() argument
203 memset(self, 0, sizeof(*self)); in aq_ring_hwts_rx_alloc()
205 self->aq_nic = aq_nic; in aq_ring_hwts_rx_alloc()
206 self->idx = idx; in aq_ring_hwts_rx_alloc()
207 self->size = size; in aq_ring_hwts_rx_alloc()
208 self->dx_size = dx_size; in aq_ring_hwts_rx_alloc()
210 self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa, in aq_ring_hwts_rx_alloc()
212 if (!self->dx_ring) { in aq_ring_hwts_rx_alloc()
213 aq_ring_free(self); in aq_ring_hwts_rx_alloc()
217 return self; in aq_ring_hwts_rx_alloc()
220 int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) in aq_ring_init() argument
222 self->hw_head = 0; in aq_ring_init()
223 self->sw_head = 0; in aq_ring_init()
224 self->sw_tail = 0; in aq_ring_init()
225 self->ring_type = ring_type; in aq_ring_init()
227 if (self->ring_type == ATL_RING_RX) in aq_ring_init()
228 u64_stats_init(&self->stats.rx.syncp); in aq_ring_init()
230 u64_stats_init(&self->stats.tx.syncp); in aq_ring_init()
275 bool aq_ring_tx_clean(struct aq_ring_s *self) in aq_ring_tx_clean() argument
277 struct device *dev = aq_nic_get_dev(self->aq_nic); in aq_ring_tx_clean()
281 budget && self->sw_head != self->hw_head; budget--) { in aq_ring_tx_clean()
282 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in aq_ring_tx_clean()
288 (!aq_ring_dx_in_range(self->sw_head, in aq_ring_tx_clean()
290 self->hw_head))) in aq_ring_tx_clean()
302 u64_stats_update_begin(&self->stats.tx.syncp); in aq_ring_tx_clean()
303 ++self->stats.tx.packets; in aq_ring_tx_clean()
304 self->stats.tx.bytes += buff->skb->len; in aq_ring_tx_clean()
305 u64_stats_update_end(&self->stats.tx.syncp); in aq_ring_tx_clean()
311 self->sw_head = aq_ring_next_dx(self, self->sw_head); in aq_ring_tx_clean()
317 static void aq_rx_checksum(struct aq_ring_s *self, in aq_rx_checksum() argument
321 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM)) in aq_rx_checksum()
325 u64_stats_update_begin(&self->stats.rx.syncp); in aq_rx_checksum()
326 ++self->stats.rx.errors; in aq_rx_checksum()
327 u64_stats_update_end(&self->stats.rx.syncp); in aq_rx_checksum()
342 int aq_ring_rx_clean(struct aq_ring_s *self, in aq_ring_rx_clean() argument
347 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); in aq_ring_rx_clean()
351 for (; (self->sw_head != self->hw_head) && budget; in aq_ring_rx_clean()
352 self->sw_head = aq_ring_next_dx(self, self->sw_head), in aq_ring_rx_clean()
354 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in aq_ring_rx_clean()
355 bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self); in aq_ring_rx_clean()
369 buff_ = &self->buff_ring[next_]; in aq_ring_rx_clean()
371 aq_ring_dx_in_range(self->sw_head, in aq_ring_rx_clean()
373 self->hw_head); in aq_ring_rx_clean()
392 buff_ = &self->buff_ring[next_]; in aq_ring_rx_clean()
397 u64_stats_update_begin(&self->stats.rx.syncp); in aq_ring_rx_clean()
398 ++self->stats.rx.errors; in aq_ring_rx_clean()
399 u64_stats_update_end(&self->stats.rx.syncp); in aq_ring_rx_clean()
405 u64_stats_update_begin(&self->stats.rx.syncp); in aq_ring_rx_clean()
406 ++self->stats.rx.errors; in aq_ring_rx_clean()
407 u64_stats_update_end(&self->stats.rx.syncp); in aq_ring_rx_clean()
411 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), in aq_ring_rx_clean()
418 u64_stats_update_begin(&self->stats.rx.syncp); in aq_ring_rx_clean()
419 self->stats.rx.skb_alloc_fails++; in aq_ring_rx_clean()
420 u64_stats_update_end(&self->stats.rx.syncp); in aq_ring_rx_clean()
426 aq_ptp_extract_ts(self->aq_nic, skb, in aq_ring_rx_clean()
452 buff_ = &self->buff_ring[next_]; in aq_ring_rx_clean()
454 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), in aq_ring_rx_clean()
481 aq_rx_checksum(self, buff, skb); in aq_ring_rx_clean()
489 : AQ_NIC_RING2QMAP(self->aq_nic, in aq_ring_rx_clean()
490 self->idx)); in aq_ring_rx_clean()
492 u64_stats_update_begin(&self->stats.rx.syncp); in aq_ring_rx_clean()
493 ++self->stats.rx.packets; in aq_ring_rx_clean()
494 self->stats.rx.bytes += skb->len; in aq_ring_rx_clean()
495 u64_stats_update_end(&self->stats.rx.syncp); in aq_ring_rx_clean()
504 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) in aq_ring_hwts_rx_clean() argument
507 while (self->sw_head != self->hw_head) { in aq_ring_hwts_rx_clean()
511 self->dx_ring + in aq_ring_hwts_rx_clean()
512 (self->sw_head * self->dx_size), in aq_ring_hwts_rx_clean()
513 self->dx_size, &ns); in aq_ring_hwts_rx_clean()
516 self->sw_head = aq_ring_next_dx(self, self->sw_head); in aq_ring_hwts_rx_clean()
521 int aq_ring_rx_fill(struct aq_ring_s *self) in aq_ring_rx_fill() argument
523 unsigned int page_order = self->page_order; in aq_ring_rx_fill()
528 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES, in aq_ring_rx_fill()
529 self->size / 2)) in aq_ring_rx_fill()
532 for (i = aq_ring_avail_dx(self); i--; in aq_ring_rx_fill()
533 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { in aq_ring_rx_fill()
534 buff = &self->buff_ring[self->sw_tail]; in aq_ring_rx_fill()
539 err = aq_get_rxpages(self, buff, page_order); in aq_ring_rx_fill()
551 void aq_ring_rx_deinit(struct aq_ring_s *self) in aq_ring_rx_deinit() argument
553 if (!self) in aq_ring_rx_deinit()
556 for (; self->sw_head != self->sw_tail; in aq_ring_rx_deinit()
557 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { in aq_ring_rx_deinit()
558 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; in aq_ring_rx_deinit()
560 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); in aq_ring_rx_deinit()
564 void aq_ring_free(struct aq_ring_s *self) in aq_ring_free() argument
566 if (!self) in aq_ring_free()
569 kfree(self->buff_ring); in aq_ring_free()
571 if (self->dx_ring) in aq_ring_free()
572 dma_free_coherent(aq_nic_get_dev(self->aq_nic), in aq_ring_free()
573 self->size * self->dx_size, self->dx_ring, in aq_ring_free()
574 self->dx_ring_pa); in aq_ring_free()
577 unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) in aq_ring_fill_stats_data() argument
582 if (self->ring_type == ATL_RING_RX) { in aq_ring_fill_stats_data()
586 start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp); in aq_ring_fill_stats_data()
587 data[count] = self->stats.rx.packets; in aq_ring_fill_stats_data()
588 data[++count] = self->stats.rx.jumbo_packets; in aq_ring_fill_stats_data()
589 data[++count] = self->stats.rx.lro_packets; in aq_ring_fill_stats_data()
590 data[++count] = self->stats.rx.errors; in aq_ring_fill_stats_data()
591 data[++count] = self->stats.rx.alloc_fails; in aq_ring_fill_stats_data()
592 data[++count] = self->stats.rx.skb_alloc_fails; in aq_ring_fill_stats_data()
593 data[++count] = self->stats.rx.polls; in aq_ring_fill_stats_data()
594 } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); in aq_ring_fill_stats_data()
599 start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp); in aq_ring_fill_stats_data()
600 data[count] = self->stats.tx.packets; in aq_ring_fill_stats_data()
601 data[++count] = self->stats.tx.queue_restarts; in aq_ring_fill_stats_data()
602 } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start)); in aq_ring_fill_stats_data()