Home
last modified time | relevance | path

Searched refs:TX_RING_SIZE (Results 1 – 25 of 49) sorted by relevance

12

/Linux-v4.19/drivers/net/ethernet/sun/
Dsungem.h883 #define TX_RING_SIZE 128 macro
886 #if TX_RING_SIZE == 32
888 #elif TX_RING_SIZE == 64
890 #elif TX_RING_SIZE == 128
892 #elif TX_RING_SIZE == 256
894 #elif TX_RING_SIZE == 512
896 #elif TX_RING_SIZE == 1024
898 #elif TX_RING_SIZE == 2048
900 #elif TX_RING_SIZE == 4096
902 #elif TX_RING_SIZE == 8192
[all …]
Dsunhme.h331 #define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */ macro
334 #if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
335 #error TX_RING_SIZE holds illegal value
361 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
363 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
367 (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
418 struct sk_buff *tx_skbs[TX_RING_SIZE];
Dsunbmac.h251 #define TX_RING_SIZE 256 macro
255 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
257 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
261 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
300 struct sk_buff *tx_skbs[TX_RING_SIZE];
Dsunqe.h291 #define TX_RING_SIZE 16 macro
301 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
326 u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
/Linux-v4.19/drivers/net/ethernet/amd/
Dariadne.c80 #define TX_RING_SIZE 5 macro
88 volatile struct TDRE *tx_ring[TX_RING_SIZE];
90 volatile u_short *tx_buff[TX_RING_SIZE];
100 struct TDRE tx_ring[TX_RING_SIZE];
102 u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
129 for (i = 0; i < TX_RING_SIZE; i++) { in ariadne_init_ring()
309 int entry = dirty_tx % TX_RING_SIZE; in ariadne_interrupt()
345 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { in ariadne_interrupt()
349 dirty_tx += TX_RING_SIZE; in ariadne_interrupt()
354 dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { in ariadne_interrupt()
[all …]
D7990.h39 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) macro
41 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
90 volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
Datarilance.c111 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
113 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
157 struct lance_tx_head tx_head[TX_RING_SIZE];
704 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring()
755 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_tx_timeout()
828 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { in lance_start_xmit()
829 lp->cur_tx -= TX_RING_SIZE; in lance_start_xmit()
830 lp->dirty_tx -= TX_RING_SIZE; in lance_start_xmit()
919 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
923 dirty_tx += TX_RING_SIZE; in lance_interrupt()
[all …]
Dlance.c193 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
194 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
238 struct lance_tx_head tx_ring[TX_RING_SIZE];
242 struct sk_buff* tx_skbuff[TX_RING_SIZE];
559 lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ, in lance_probe1()
850 for (i = 0; i < TX_RING_SIZE; i++) { in lance_purge_ring()
887 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring()
936 for (i = 0; i < TX_RING_SIZE; i++) in lance_tx_timeout()
1011 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) in lance_start_xmit()
1094 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
[all …]
Dsun3lance.c94 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
96 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
142 struct lance_tx_head tx_head[TX_RING_SIZE];
145 char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
464 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring()
551 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_start_xmit()
Ddeclance.c155 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
156 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
222 struct lance_tx_desc btx_ring[TX_RING_SIZE];
267 char *tx_buf_ptr_cpu[TX_RING_SIZE];
271 uint tx_buf_ptr_lnc[TX_RING_SIZE];
494 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring()
1093 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe()
1138 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe()
1169 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe()
/Linux-v4.19/drivers/net/ethernet/pasemi/
Dpasemi_mac.h30 #define TX_RING_SIZE 4096 macro
31 #define CS_RING_SIZE (TX_RING_SIZE*2)
105 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
106 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
/Linux-v4.19/drivers/net/ethernet/packetengines/
Dyellowfin.c73 #define TX_RING_SIZE 16 macro
76 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
77 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
312 struct sk_buff* tx_skbuff[TX_RING_SIZE];
698 for (i = 0; i < TX_RING_SIZE; i++) in yellowfin_tx_timeout()
757 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring()
761 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); in yellowfin_init_ring()
768 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring()
794 ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc)); in yellowfin_init_ring()
817 entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit()
[all …]
Dhamachi.c119 #define TX_RING_SIZE 64 macro
121 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct hamachi_desc)
487 struct sk_buff* tx_skbuff[TX_RING_SIZE];
996 int entry = hmp->dirty_tx % TX_RING_SIZE; in hamachi_tx()
1011 if (entry >= TX_RING_SIZE-1) in hamachi_tx()
1012 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= in hamachi_tx()
1061 for (i = 0; i < TX_RING_SIZE; i++) in hamachi_tx_timeout()
1084 for (i = 0; i < TX_RING_SIZE; i++){ in hamachi_tx_timeout()
1087 if (i >= TX_RING_SIZE - 1) in hamachi_tx_timeout()
1195 for (i = 0; i < TX_RING_SIZE; i++) { in hamachi_init_ring()
[all …]
/Linux-v4.19/drivers/net/ethernet/dlink/
Ddl2k.h38 #define TX_RING_SIZE 256 macro
39 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
41 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
372 struct sk_buff *tx_skbuff[TX_RING_SIZE];
Dsundance.c68 #define TX_RING_SIZE 32 macro
69 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
72 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
374 struct sk_buff* tx_skbuff[TX_RING_SIZE];
988 for (i=0; i<TX_RING_SIZE; i++) { in tx_timeout()
1001 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout()
1002 np->dirty_tx, np->dirty_tx % TX_RING_SIZE); in tx_timeout()
1066 for (i = 0; i < TX_RING_SIZE; i++) { in init_ring()
1076 unsigned head = np->cur_task % TX_RING_SIZE; in tx_poll()
1078 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; in tx_poll()
[all …]
Ddl2k.c222 else if (tx_coalesce > TX_RING_SIZE-1) in rio_probe1()
223 tx_coalesce = TX_RING_SIZE - 1; in rio_probe1()
458 for (i = 0; i < TX_RING_SIZE; i++) { in free_list()
478 for (i = 0; i < TX_RING_SIZE; i++) in rio_reset_ring()
495 for (i = 0; i < TX_RING_SIZE; i++) { in alloc_list()
498 ((i + 1) % TX_RING_SIZE) * in alloc_list()
727 entry = np->cur_tx % TX_RING_SIZE; in start_xmit()
764 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; in start_xmit()
765 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE in start_xmit()
826 int entry = np->old_tx % TX_RING_SIZE; in rio_free_tx()
[all …]
/Linux-v4.19/drivers/net/ethernet/dec/tulip/
Dtulip_core.c598 for (i = 0; i < TX_RING_SIZE; i++) in tulip_tx_timeout()
654 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_init_ring()
675 entry = tp->cur_tx % TX_RING_SIZE; in tulip_start_xmit()
683 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ in tulip_start_xmit()
685 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { in tulip_start_xmit()
687 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { in tulip_start_xmit()
693 if (entry == TX_RING_SIZE-1) in tulip_start_xmit()
718 int entry = dirty_tx % TX_RING_SIZE; in tulip_clean_tx_ring()
815 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_free_ring()
1139 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { in set_rx_mode()
[all …]
Dinterrupt.c526 int maxtx = TX_RING_SIZE; in tulip_interrupt()
527 int maxoi = TX_RING_SIZE; in tulip_interrupt()
590 int entry = dirty_tx % TX_RING_SIZE; in tulip_interrupt()
644 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { in tulip_interrupt()
648 dirty_tx += TX_RING_SIZE; in tulip_interrupt()
652 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) in tulip_interrupt()
Dwinbond-840.c298 dma_addr_t tx_addr[TX_RING_SIZE];
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
826 for (i = 0; i < TX_RING_SIZE; i++) { in init_rxtx_rings()
854 for (i = 0; i < TX_RING_SIZE; i++) { in free_rxtx_rings()
940 for (i = 0; i < TX_RING_SIZE; i++) in tx_timeout()
979 sizeof(struct w840_tx_desc)*TX_RING_SIZE, in alloc_ringdesc()
991 sizeof(struct w840_tx_desc)*TX_RING_SIZE, in free_ringdesc()
1005 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1020 if(entry == TX_RING_SIZE-1) in start_tx()
1062 int entry = np->dirty_tx % TX_RING_SIZE; in netdev_tx_done()
[all …]
/Linux-v4.19/drivers/net/ethernet/3com/
D3c515.c56 #define TX_RING_SIZE 16 macro
306 struct boom_tx_desc tx_ring[TX_RING_SIZE];
309 struct sk_buff *tx_skbuff[TX_RING_SIZE];
845 for (i = 0; i < TX_RING_SIZE; i++) in corkscrew_open()
983 for (i = 0; i < TX_RING_SIZE; i++) { in corkscrew_timeout()
1013 int entry = vp->cur_tx % TX_RING_SIZE; in corkscrew_start_xmit()
1021 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; in corkscrew_start_xmit()
1051 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) in corkscrew_start_xmit()
1176 int entry = dirty_tx % TX_RING_SIZE; in corkscrew_interrupt()
1187 if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { in corkscrew_interrupt()
[all …]
/Linux-v4.19/drivers/net/wan/
Ddscc4.c164 #define TX_RING_SIZE 32 macro
166 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
201 struct sk_buff *tx_skbuff[TX_RING_SIZE];
428 ((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD); in dscc4_do_tx()
493 for (i = 0; i < TX_RING_SIZE; i++) { in dscc4_release_ring()
1150 next = dpriv->tx_current%TX_RING_SIZE; in dscc4_start_xmit()
1168 if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)) in dscc4_start_xmit()
1541 if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE) in dscc4_tx_irq()
1567 cur = dpriv->tx_dirty%TX_RING_SIZE; in dscc4_tx_irq()
1644 (dpriv->tx_dirty%TX_RING_SIZE)* in dscc4_tx_irq()
[all …]
/Linux-v4.19/drivers/net/ethernet/adaptec/
Dstarfire.c123 #define TX_RING_SIZE 32 macro
539 struct tx_ring_info tx_info[TX_RING_SIZE];
901 …tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE… in netdev_open()
1182 for (i = 0; i < TX_RING_SIZE; i++) in init_ring()
1199 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) { in start_tx()
1212 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1220 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { in start_tx()
1258 np->tx_info[entry].used_slots = TX_RING_SIZE - entry; in start_tx()
1267 if (np->cur_tx % (TX_RING_SIZE / 2) == 0) in start_tx()
1280 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) in start_tx()
[all …]
/Linux-v4.19/drivers/net/ethernet/freescale/
Dfec.h348 #define TX_RING_SIZE 512 /* Must be power of two */ macro
475 unsigned char *tx_bounce[TX_RING_SIZE];
476 struct sk_buff *tx_skbuff[TX_RING_SIZE];
/Linux-v4.19/drivers/net/ethernet/via/
Dvia-rhine.c79 #define TX_RING_SIZE 64 macro
80 #define TX_QUEUE_LEN (TX_RING_SIZE - 6) /* Limit ring entries actually used. */
452 struct sk_buff *tx_skbuff[TX_RING_SIZE];
453 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
456 unsigned char *tx_buf[TX_RING_SIZE];
1165 TX_RING_SIZE * sizeof(struct tx_desc), in alloc_ring()
1174 PKT_BUF_SZ * TX_RING_SIZE, in alloc_ring()
1180 TX_RING_SIZE * sizeof(struct tx_desc), in alloc_ring()
1201 TX_RING_SIZE * sizeof(struct tx_desc), in free_ring()
1206 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE, in free_ring()
[all …]
/Linux-v4.19/drivers/net/ethernet/smsc/
Dsmsc9420.c516 for (i = 0; i < TX_RING_SIZE; i++) { in smsc9420_free_tx_ring()
935 pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; in smsc9420_complete_tx()
947 (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); in smsc9420_hard_start_xmit()
974 if (unlikely(index == (TX_RING_SIZE - 1))) in smsc9420_hard_start_xmit()
982 pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; in smsc9420_hard_start_xmit()
1197 pd->tx_buffers = kmalloc_array(TX_RING_SIZE, in smsc9420_alloc_tx_ring()
1204 for (i = 0; i < TX_RING_SIZE; i++) { in smsc9420_alloc_tx_ring()
1212 pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; in smsc9420_alloc_tx_ring()
1583 sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE, in smsc9420_probe()
1641 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); in smsc9420_probe()
[all …]

12