Home
last modified time | relevance | path

Searched refs:TX_RING_SIZE (Results 1 – 25 of 51) sorted by relevance

123

/linux/drivers/net/ethernet/sun/
H A Dsungem.h883 #define TX_RING_SIZE 128 macro
886 #if TX_RING_SIZE == 32
888 #elif TX_RING_SIZE == 64
890 #elif TX_RING_SIZE == 128
892 #elif TX_RING_SIZE == 256
894 #elif TX_RING_SIZE == 512
896 #elif TX_RING_SIZE == 1024
898 #elif TX_RING_SIZE == 2048
900 #elif TX_RING_SIZE == 4096
902 #elif TX_RING_SIZE == 8192
[all …]
H A Dsunhme.h331 #define TX_RING_SIZE 32 /* Must be >16 and <255, multiple of 16 */ macro
334 #if (TX_RING_SIZE < 16 || TX_RING_SIZE > 256 || (TX_RING_SIZE % 16) != 0)
335 #error TX_RING_SIZE holds illegal value
361 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
363 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
367 (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
418 struct sk_buff *tx_skbs[TX_RING_SIZE];
H A Dsunbmac.h251 #define TX_RING_SIZE 256 macro
255 #define NEXT_TX(num) (((num) + 1) & (TX_RING_SIZE - 1))
257 #define PREV_TX(num) (((num) - 1) & (TX_RING_SIZE - 1))
261 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
300 struct sk_buff *tx_skbs[TX_RING_SIZE];
H A Dsunqe.h291 #define TX_RING_SIZE 16 macro
301 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
326 u8 tx_buf[TX_RING_SIZE][PKT_BUF_SZ];
/linux/drivers/net/ethernet/amd/
H A Dariadne.c80 #define TX_RING_SIZE 5 macro
90 volatile u_short *tx_buff[TX_RING_SIZE];
100 struct TDRE tx_ring[TX_RING_SIZE];
129 for (i = 0; i < TX_RING_SIZE; i++) { in ariadne_init_ring()
309 int entry = dirty_tx % TX_RING_SIZE; in ariadne_interrupt()
349 dirty_tx += TX_RING_SIZE; in ariadne_interrupt()
576 entry = priv->cur_tx % TX_RING_SIZE; in ariadne_start_xmit()
597 if ((priv->cur_tx >= TX_RING_SIZE) && in ariadne_start_xmit()
598 (priv->dirty_tx >= TX_RING_SIZE)) { in ariadne_start_xmit()
603 priv->cur_tx -= TX_RING_SIZE; in ariadne_start_xmit()
[all …]
H A D7990.h39 #define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS) macro
41 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
90 volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
H A Datarilance.c111 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
113 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
157 struct lance_tx_head tx_head[TX_RING_SIZE];
704 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring()
755 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_tx_timeout()
829 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { in lance_start_xmit()
830 lp->cur_tx -= TX_RING_SIZE; in lance_start_xmit()
831 lp->dirty_tx -= TX_RING_SIZE; in lance_start_xmit()
920 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
924 dirty_tx += TX_RING_SIZE; in lance_interrupt()
[all …]
H A Dlance.c194 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
195 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
239 struct lance_tx_head tx_ring[TX_RING_SIZE];
243 struct sk_buff* tx_skbuff[TX_RING_SIZE];
855 for (i = 0; i < TX_RING_SIZE; i++) { in lance_purge_ring()
892 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring()
941 for (i = 0; i < TX_RING_SIZE; i++) in lance_tx_timeout()
1016 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) in lance_start_xmit()
1099 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) { in lance_interrupt()
1103 dirty_tx += TX_RING_SIZE; in lance_interrupt()
[all …]
H A Dsun3lance.c94 #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) macro
96 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
142 struct lance_tx_head tx_head[TX_RING_SIZE];
145 char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
459 for( i = 0; i < TX_RING_SIZE; i++ ) { in lance_init_ring()
547 for( i = 0 ; i < TX_RING_SIZE; i++ ) in lance_start_xmit()
H A Ddeclance.c156 #define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS)) macro
157 #define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
223 struct lance_tx_desc btx_ring[TX_RING_SIZE];
268 char *tx_buf_ptr_cpu[TX_RING_SIZE];
272 uint tx_buf_ptr_lnc[TX_RING_SIZE];
495 for (i = 0; i < TX_RING_SIZE; i++) { in lance_init_ring()
1095 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe()
1140 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe()
1171 for (i = 0; i < TX_RING_SIZE; i++) { in dec_lance_probe()
/linux/drivers/net/ethernet/pasemi/
H A Dpasemi_mac.h19 #define TX_RING_SIZE 4096 macro
20 #define CS_RING_SIZE (TX_RING_SIZE*2)
94 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
95 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
/linux/drivers/net/ethernet/packetengines/
H A Dyellowfin.c73 #define TX_RING_SIZE 16 macro
312 struct sk_buff* tx_skbuff[TX_RING_SIZE];
705 for (i = 0; i < TX_RING_SIZE; i++) in yellowfin_tx_timeout()
766 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring()
777 for (i = 0; i < TX_RING_SIZE; i++) { in yellowfin_init_ring()
826 entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit()
847 if (entry >= TX_RING_SIZE-1) { in yellowfin_start_xmit()
1008 dirty_tx += TX_RING_SIZE; in yellowfin_interrupt()
1231 for (i = 0; i < TX_RING_SIZE*2; i++) in yellowfin_close()
1237 for (i = 0; i < TX_RING_SIZE; i++) in yellowfin_close()
[all …]
H A Dhamachi.c119 #define TX_RING_SIZE 64 macro
487 struct sk_buff* tx_skbuff[TX_RING_SIZE];
1017 if (entry >= TX_RING_SIZE-1) in hamachi_tx()
1067 for (i = 0; i < TX_RING_SIZE; i++) in hamachi_tx_timeout()
1090 for (i = 0; i < TX_RING_SIZE; i++){ in hamachi_tx_timeout()
1093 if (i >= TX_RING_SIZE - 1) in hamachi_tx_timeout()
1206 for (i = 0; i < TX_RING_SIZE; i++) { in hamachi_init_ring()
1243 entry = hmp->cur_tx % TX_RING_SIZE; in hamachi_start_xmit()
1357 if (entry >= TX_RING_SIZE-1) in hamachi_interrupt()
1688 for (i = 0; i < TX_RING_SIZE; i++) in hamachi_close()
[all …]
/linux/drivers/net/ethernet/dlink/
H A Ddl2k.h35 #define TX_RING_SIZE 256 macro
36 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used.*/
38 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
369 struct sk_buff *tx_skbuff[TX_RING_SIZE];
H A Dsundance.c65 #define TX_RING_SIZE 32 macro
366 struct sk_buff* tx_skbuff[TX_RING_SIZE];
977 for (i=0; i<TX_RING_SIZE; i++) { in tx_timeout()
990 np->cur_tx, np->cur_tx % TX_RING_SIZE, in tx_timeout()
1055 for (i = 0; i < TX_RING_SIZE; i++) { in init_ring()
1070 int entry = np->cur_task % TX_RING_SIZE; in tx_poll()
1094 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1148 for (i = 0; i < TX_RING_SIZE; i++) { in reset_tx()
1269 TX_RING_SIZE) in intr_handler()
1869 for (i = 0; i < TX_RING_SIZE; i++) in netdev_close()
[all …]
H A Ddl2k.c210 else if (tx_coalesce > TX_RING_SIZE-1) in rio_probe1()
211 tx_coalesce = TX_RING_SIZE - 1; in rio_probe1()
450 for (i = 0; i < TX_RING_SIZE; i++) { in free_list()
471 for (i = 0; i < TX_RING_SIZE; i++) in rio_reset_ring()
488 for (i = 0; i < TX_RING_SIZE; i++) { in alloc_list()
491 ((i + 1) % TX_RING_SIZE) * in alloc_list()
717 entry = np->cur_tx % TX_RING_SIZE; in start_xmit()
754 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE in start_xmit()
815 int entry = np->old_tx % TX_RING_SIZE; in rio_free_tx()
839 entry = (entry + 1) % TX_RING_SIZE; in rio_free_tx()
[all …]
/linux/drivers/net/ethernet/dec/tulip/
H A Dtulip_core.c587 for (i = 0; i < TX_RING_SIZE; i++) in tulip_tx_timeout()
643 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_init_ring()
664 entry = tp->cur_tx % TX_RING_SIZE; in tulip_start_xmit()
682 if (entry == TX_RING_SIZE-1) in tulip_start_xmit()
707 int entry = dirty_tx % TX_RING_SIZE; in tulip_clean_tx_ring()
805 for (i = 0; i < TX_RING_SIZE; i++) { in tulip_free_ring()
1129 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { in set_rx_mode()
1137 entry = tp->cur_tx++ % TX_RING_SIZE; in set_rx_mode()
1148 entry = tp->cur_tx++ % TX_RING_SIZE; in set_rx_mode()
1159 if (entry == TX_RING_SIZE-1) in set_rx_mode()
[all …]
H A Dinterrupt.c533 int maxtx = TX_RING_SIZE; in tulip_interrupt()
534 int maxoi = TX_RING_SIZE; in tulip_interrupt()
597 int entry = dirty_tx % TX_RING_SIZE; in tulip_interrupt()
652 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { in tulip_interrupt()
656 dirty_tx += TX_RING_SIZE; in tulip_interrupt()
660 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) in tulip_interrupt()
H A Dwinbond-840.c288 dma_addr_t tx_addr[TX_RING_SIZE];
293 struct sk_buff* tx_skbuff[TX_RING_SIZE];
816 for (i = 0; i < TX_RING_SIZE; i++) { in init_rxtx_rings()
843 for (i = 0; i < TX_RING_SIZE; i++) { in free_rxtx_rings()
927 for (i = 0; i < TX_RING_SIZE; i++) in tx_timeout()
966 sizeof(struct w840_tx_desc) * TX_RING_SIZE, in alloc_ringdesc()
978 sizeof(struct w840_tx_desc) * TX_RING_SIZE, in free_ringdesc()
992 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1007 if(entry == TX_RING_SIZE-1) in start_tx()
1049 int entry = np->dirty_tx % TX_RING_SIZE; in netdev_tx_done()
[all …]
/linux/drivers/net/ethernet/3com/
H A D3c515.c48 #define TX_RING_SIZE 16 macro
299 struct boom_tx_desc tx_ring[TX_RING_SIZE];
302 struct sk_buff *tx_skbuff[TX_RING_SIZE];
832 for (i = 0; i < TX_RING_SIZE; i++) in corkscrew_open()
970 for (i = 0; i < TX_RING_SIZE; i++) { in corkscrew_timeout()
1000 int entry = vp->cur_tx % TX_RING_SIZE; in corkscrew_start_xmit()
1008 prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; in corkscrew_start_xmit()
1038 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) in corkscrew_start_xmit()
1163 int entry = dirty_tx % TX_RING_SIZE; in corkscrew_interrupt()
1174 if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { in corkscrew_interrupt()
[all …]
H A D3c59x.c38 #define TX_RING_SIZE 16 macro
603 struct sk_buff* tx_skbuff[TX_RING_SIZE];
1478 sizeof(struct boom_tx_desc) * TX_RING_SIZE, in vortex_probe1()
1685 for (i = 0; i < TX_RING_SIZE; i++) in vortex_up()
2116 int entry = vp->cur_tx % TX_RING_SIZE; in boomerang_start_xmit()
2422 int entry = dirty_tx % TX_RING_SIZE; in _boomerang_interrupt()
2764 for (i = 0; i < TX_RING_SIZE; i++) { in vortex_close()
2800 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, in dump_tx_ring()
2801 vp->cur_tx, vp->cur_tx % TX_RING_SIZE); in dump_tx_ring()
2806 for (i = 0; i < TX_RING_SIZE; i++) { in dump_tx_ring()
[all …]
/linux/drivers/net/ethernet/adaptec/
H A Dstarfire.c121 #define TX_RING_SIZE 32 macro
531 struct tx_ring_info tx_info[TX_RING_SIZE];
1173 for (i = 0; i < TX_RING_SIZE; i++) in init_ring()
1203 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
1211 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) { in start_tx()
1250 np->tx_info[entry].used_slots = TX_RING_SIZE - entry; in start_tx()
1259 if (np->cur_tx % (TX_RING_SIZE / 2) == 0) in start_tx()
1272 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) in start_tx()
1278 entry = prev_tx % TX_RING_SIZE; in start_tx()
1391 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) { in intr_handler()
[all …]
/linux/drivers/net/ethernet/via/
H A Dvia-rhine.c77 #define TX_RING_SIZE 64 macro
446 struct sk_buff *tx_skbuff[TX_RING_SIZE];
447 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
450 unsigned char *tx_buf[TX_RING_SIZE];
1147 TX_RING_SIZE * sizeof(struct tx_desc), in alloc_ring()
1156 PKT_BUF_SZ * TX_RING_SIZE, in alloc_ring()
1183 TX_RING_SIZE * sizeof(struct tx_desc), in free_ring()
1309 for (i = 0; i < TX_RING_SIZE; i++) { in alloc_tbufs()
1329 for (i = 0; i < TX_RING_SIZE; i++) { in free_tbufs()
1781 entry = rp->cur_tx % TX_RING_SIZE; in rhine_start_tx()
[all …]
/linux/drivers/net/ethernet/smsc/
H A Dsmsc9420.c498 for (i = 0; i < TX_RING_SIZE; i++) { in smsc9420_free_tx_ring()
921 pd->tx_ring_tail = (pd->tx_ring_tail + 1) % TX_RING_SIZE; in smsc9420_complete_tx()
933 (((pd->tx_ring_head + 2) % TX_RING_SIZE) == pd->tx_ring_tail); in smsc9420_hard_start_xmit()
960 if (unlikely(index == (TX_RING_SIZE - 1))) in smsc9420_hard_start_xmit()
968 pd->tx_ring_head = (pd->tx_ring_head + 1) % TX_RING_SIZE; in smsc9420_hard_start_xmit()
1182 pd->tx_buffers = kmalloc_array(TX_RING_SIZE, in smsc9420_alloc_tx_ring()
1189 for (i = 0; i < TX_RING_SIZE; i++) { in smsc9420_alloc_tx_ring()
1197 pd->tx_ring[TX_RING_SIZE - 1].length = TDES1_TER_; in smsc9420_alloc_tx_ring()
1550 sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE), in smsc9420_probe()
1608 sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE), in smsc9420_probe()
[all …]
H A Depic100.c53 #define TX_RING_SIZE 256 macro
56 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
256 struct sk_buff* tx_skbuff[TX_RING_SIZE];
815 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc)); in epic_restart()
937 for (i = 0; i < TX_RING_SIZE; i++) { in epic_init_ring()
963 entry = ep->cur_tx % TX_RING_SIZE; in epic_start_xmit()
1033 int entry = dirty_tx % TX_RING_SIZE; in epic_tx()
1056 if (cur_tx - dirty_tx > TX_RING_SIZE) { in epic_tx()
1059 dirty_tx += TX_RING_SIZE; in epic_tx()
1316 for (i = 0; i < TX_RING_SIZE; i++) { in epic_close()

123