xref: /netbsd/sys/dev/pci/if_bgevar.h (revision d04379de)
1 /*	$NetBSD: if_bgevar.h,v 1.41 2023/02/21 22:13:02 andvar Exp $	*/
2 /*
3  * Copyright (c) 2001 Wind River Systems
4  * Copyright (c) 1997, 1998, 1999, 2001
5  *	Bill Paul <wpaul@windriver.com>.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by Bill Paul.
18  * 4. Neither the name of the author nor the names of any co-contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  * $FreeBSD: if_bgereg.h,v 1.1.2.7 2002/11/02 18:17:55 mp Exp $
35  */
36 
37 /*
38  * BCM570x memory map. The internal memory layout varies somewhat
39  * depending on whether or not we have external SSRAM attached.
40  * The BCM5700 can have up to 16MB of external memory. The BCM5701
41  * is apparently not designed to use external SSRAM. The mappings
42  * up to the first 4 send rings are the same for both internal and
43  * external memory configurations. Note that mini RX ring space is
44  * only available with external SSRAM configurations, which means
45  * the mini RX ring is not supported on the BCM5701.
46  *
47  * The NIC's memory can be accessed by the host in one of 3 ways:
48  *
49  * 1) Indirect register access. The MEMWIN_BASEADDR and MEMWIN_DATA
50  *    registers in PCI config space can be used to read any 32-bit
51  *    address within the NIC's memory.
52  *
53  * 2) Memory window access. The MEMWIN_BASEADDR register in PCI config
54  *    space can be used in conjunction with the memory window in the
55  *    device register space at offset 0x8000 to read any 32K chunk
56  *    of NIC memory.
57  *
58  * 3) Flat mode. If the 'flat mode' bit in the PCI state register is
59  *    set, the device I/O mapping consumes 32MB of host address space,
60  *    allowing all of the registers and internal NIC memory to be
61  *    accessed directly. NIC memory addresses are offset by 0x01000000.
62  *    Flat mode consumes so much host address space that it is not
63  *    recommended.
64  */
65 
66 #ifndef _DEV_PCI_IF_BGEVAR_H_
67 #define _DEV_PCI_IF_BGEVAR_H_
68 
69 #include <sys/bus.h>
70 #include <sys/rndsource.h>
71 #include <sys/time.h>
72 
73 #include <net/if_ether.h>
74 
75 #include <dev/pci/pcivar.h>
76 
77 #define BGE_HOSTADDR(x, y)						      \
78 	do {								      \
79 		(x).bge_addr_lo = BUS_ADDR_LO32(y);			      \
80 		if (sizeof (bus_addr_t) == 8)				      \
81 			(x).bge_addr_hi = BUS_ADDR_HI32(y);		      \
82 		else							      \
83 			(x).bge_addr_hi = 0;				      \
84 	} while(0)
85 
86 #define RCB_WRITE_4(sc, rcb, offset, val)				      \
87 	bus_space_write_4(sc->bge_btag, sc->bge_bhandle,		      \
88 			  rcb + offsetof(struct bge_rcb, offset), val)
89 
90 /*
91  * Other utility macros.
92  */
93 #define BGE_INC(x, y)	(x) = (x + 1) % y
94 
95 /*
96  * Register access macros. The Tigon always uses memory mapped register
97  * accesses and all registers must be accessed with 32 bit operations.
98  */
99 
100 #define CSR_WRITE_4(sc, reg, val)					      \
101 	bus_space_write_4(sc->bge_btag, sc->bge_bhandle, reg, val)
102 
103 #define CSR_READ_4(sc, reg)						      \
104 	bus_space_read_4(sc->bge_btag, sc->bge_bhandle, reg)
105 
106 #define CSR_WRITE_4_FLUSH(sc, reg, val)					      \
107 	do {								      \
108 		CSR_WRITE_4(sc, reg, val);				      \
109 		CSR_READ_4(sc, reg);					      \
110 	} while (0)
111 
112 #define BGE_SETBIT(sc, reg, x)						      \
113 	CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) | (x)))
114 #define BGE_SETBIT_FLUSH(sc, reg, x)					      \
115 	do {								      \
116 		BGE_SETBIT(sc, reg, x);					      \
117 		CSR_READ_4(sc, reg);					      \
118 	} while (0)
119 #define BGE_CLRBIT(sc, reg, x)						      \
120 	CSR_WRITE_4(sc, reg, (CSR_READ_4(sc, reg) & ~(x)))
121 #define BGE_CLRBIT_FLUSH(sc, reg, x)					      \
122 	do {								      \
123 		BGE_CLRBIT(sc, reg, x);					      \
124 		CSR_READ_4(sc, reg);					      \
125 	} while (0)
126 
127 /* BAR2 APE register access macros. */
128 #define	APE_WRITE_4(sc, reg, val)					      \
129 	bus_space_write_4(sc->bge_apetag, sc->bge_apehandle, reg, val)
130 
131 #define	APE_READ_4(sc, reg)						      \
132 	bus_space_read_4(sc->bge_apetag, sc->bge_apehandle, reg)
133 
134 #define	APE_WRITE_4_FLUSH(sc, reg, val)					      \
135 	do {								      \
136 		APE_WRITE_4(sc, reg, val);				      \
137 		APE_READ_4(sc, reg);					      \
138 	} while (0)
139 
140 #define	APE_SETBIT(sc, reg, x)						      \
141 	APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) | (x)))
142 #define	APE_CLRBIT(sc, reg, x)						      \
143 	APE_WRITE_4(sc, reg, (APE_READ_4(sc, reg) & ~(x)))
144 
145 #define PCI_SETBIT(pc, tag, reg, x)					      \
146 	pci_conf_write(pc, tag, reg, (pci_conf_read(pc, tag, reg) | (x)))
147 #define PCI_CLRBIT(pc, tag, reg, x)					      \
148 	pci_conf_write(pc, tag, reg, (pci_conf_read(pc, tag, reg) & ~(x)))
149 
150 /*
151  * Memory management stuff. Note: the SSLOTS, MSLOTS and JSLOTS
152  * values are tuneable. They control the actual amount of buffers
153  * allocated for the standard, mini and jumbo receive rings.
154  */
155 
156 #define BGE_SSLOTS	256
157 #define BGE_MSLOTS	256
158 #define BGE_JSLOTS	384
159 
160 #define BGE_JRAWLEN	(BGE_JUMBO_FRAMELEN + ETHER_ALIGN)
161 #define BGE_JLEN	(BGE_JRAWLEN + (sizeof(uint64_t) - 		      \
162 			    (BGE_JRAWLEN % sizeof(uint64_t))))
163 #define BGE_JPAGESZ 	PAGE_SIZE
164 #define BGE_RESID 	(BGE_JPAGESZ - (BGE_JLEN * BGE_JSLOTS) % BGE_JPAGESZ)
165 #define BGE_JMEM 	((BGE_JLEN * BGE_JSLOTS) + BGE_RESID)
166 
167 /*
168  * Ring structures. Most of these reside in host memory and we tell
169  * the NIC where they are via the ring control blocks. The exceptions
170  * are the tx and command rings, which live in NIC memory and which
171  * we access via the shared memory window.
172  */
173 struct bge_ring_data {
174 	struct bge_rx_bd	bge_rx_std_ring[BGE_STD_RX_RING_CNT];
175 	struct bge_rx_bd	bge_rx_jumbo_ring[BGE_JUMBO_RX_RING_CNT];
176 	struct bge_rx_bd	bge_rx_return_ring[BGE_RETURN_RING_CNT];
177 	struct bge_tx_bd	bge_tx_ring[BGE_TX_RING_CNT];
178 	struct bge_status_block	bge_status_block;
179 	struct bge_tx_desc	*bge_tx_ring_nic;/* pointer to shared mem */
180 	struct bge_cmd_desc	*bge_cmd_ring;	/* pointer to shared mem */
181 	struct bge_gib		bge_info;
182 };
183 
184 #define BGE_RING_DMA_ADDR(sc, offset)					      \
185 	((sc)->bge_ring_map->dm_segs[0].ds_addr +			      \
186 	offsetof(struct bge_ring_data, offset))
187 
188 /*
189  * Number of DMA segments in a TxCB. Note that this is carefully
190  * chosen to make the total struct size an even power of two. It's
191  * critical that no TxCB be split across a page boundary since
192  * no attempt is made to allocate physically contiguous memory.
193  *
194  */
195 #if 0	/* pre-TSO values */
196 #define BGE_TXDMA_MAX	ETHER_MAX_LEN_JUMBO
197 #ifdef _LP64
198 #define BGE_NTXSEG	30
199 #else
200 #define BGE_NTXSEG	31
201 #endif
202 #else	/* TSO values */
203 #define BGE_TXDMA_MAX	(round_page(IP_MAXPACKET))	/* for TSO */
204 #ifdef _LP64
205 #define BGE_NTXSEG	120	/* XXX just a guess */
206 #else
207 #define BGE_NTXSEG	124	/* XXX just a guess */
208 #endif
209 #endif	/* TSO values */
210 
211 #define	BGE_STATUS_BLK_SZ	sizeof (struct bge_status_block)
212 
213 /*
214  * Mbuf pointers. We need these to keep track of the virtual addresses
215  * of our mbuf chains since we can only convert from physical to virtual,
216  * not the other way around.
217  */
218 struct bge_chain_data {
219 	struct mbuf		*bge_tx_chain[BGE_TX_RING_CNT];
220 	struct mbuf		*bge_rx_std_chain[BGE_STD_RX_RING_CNT];
221 	struct mbuf		*bge_rx_jumbo_chain[BGE_JUMBO_RX_RING_CNT];
222 	bus_dmamap_t		bge_rx_std_map[BGE_STD_RX_RING_CNT];
223 	bus_dmamap_t		bge_rx_jumbo_map;
224 	bus_dma_segment_t	bge_rx_jumbo_seg;
225 	/* Stick the jumbo mem management stuff here too. */
226 	void *			bge_jslots[BGE_JSLOTS];
227 	void *			bge_jumbo_buf;
228 };
229 
230 #define BGE_JUMBO_DMA_ADDR(sc, m) \
231 	((sc)->bge_cdata.bge_rx_jumbo_map->dm_segs[0].ds_addr + \
232 	 (mtod((m), char *) - (char *)(sc)->bge_cdata.bge_jumbo_buf))
233 
234 struct bge_type {
235 	uint16_t		bge_vid;
236 	uint16_t		bge_did;
237 	char			*bge_name;
238 };
239 
240 #define BGE_TIMEOUT		100000
241 #define BGE_TXCONS_UNSET		0xFFFF	/* impossible value */
242 
243 struct bge_jpool_entry {
244 	int				slot;
245 	SLIST_ENTRY(bge_jpool_entry)	jpool_entries;
246 };
247 
248 struct bge_bcom_hack {
249 	int			reg;
250 	int			val;
251 };
252 
253 struct txdmamap_pool_entry {
254 	bus_dmamap_t dmamap;
255 	bus_dmamap_t dmamap32;
256 	bool is_dma32;
257 	SLIST_ENTRY(txdmamap_pool_entry) link;
258 };
259 
260 #define	ASF_ENABLE		1
261 #define	ASF_NEW_HANDSHAKE	2
262 #define	ASF_STACKUP		4
263 
264 struct bge_softc {
265 	device_t		bge_dev;
266 	struct ethercom		ethercom;	/* interface info */
267 	bus_space_handle_t	bge_bhandle;
268 	bus_space_tag_t		bge_btag;
269 	bus_size_t		bge_bsize;
270 	bus_space_handle_t	bge_apehandle;
271 	bus_space_tag_t		bge_apetag;
272 	bus_size_t		bge_apesize;
273 	void			*bge_intrhand;
274 	pci_intr_handle_t	*bge_pihp;
275 	pci_chipset_tag_t	sc_pc;
276 	pcitag_t		sc_pcitag;
277 
278 	struct pci_attach_args	bge_pa;
279 	struct mii_data		bge_mii;
280 	struct ifmedia		bge_ifmedia;	/* media info */
281 	uint32_t		bge_return_ring_cnt;
282 	uint32_t		bge_tx_prodidx;
283 	bus_dma_tag_t		bge_dmatag;
284 	bus_dma_tag_t		bge_dmatag32;
285 	bool			bge_dma64;
286 	uint32_t		bge_pcixcap;
287 	uint32_t		bge_pciecap;
288 	uint16_t		bge_mps;
289 	int			bge_expmrq;
290 	uint32_t		bge_lasttag;
291 	uint32_t		bge_mfw_flags;  /* Management F/W flags */
292 #define	BGE_MFW_ON_RXCPU	__BIT(0)
293 #define	BGE_MFW_ON_APE		__BIT(1)
294 #define	BGE_MFW_TYPE_NCSI	__BIT(2)
295 #define	BGE_MFW_TYPE_DASH	__BIT(3)
296 	int			bge_phy_ape_lock;
297 	int			bge_phy_addr;
298 	uint32_t		bge_chipid;
299 	uint8_t			bge_asf_mode;
300 	uint8_t			bge_asf_count;
301 	struct bge_ring_data	*bge_rdata;	/* rings */
302 	struct bge_chain_data	bge_cdata;	/* mbufs */
303 	bus_dmamap_t		bge_ring_map;
304 	bus_dma_segment_t	bge_ring_seg;
305 	int			bge_ring_rseg;
306 	uint16_t		bge_tx_saved_considx;
307 	uint16_t		bge_rx_saved_considx;
308 	uint16_t		bge_std;	/* current std ring head */
309 	uint16_t		bge_std_cnt;
310 	uint16_t		bge_jumbo;	/* current jumbo ring head */
311 	SLIST_HEAD(__bge_jfreehead, bge_jpool_entry)	bge_jfree_listhead;
312 	SLIST_HEAD(__bge_jinusehead, bge_jpool_entry)	bge_jinuse_listhead;
313 	uint32_t		bge_stat_ticks;
314 	uint32_t		bge_rx_coal_ticks;
315 	uint32_t		bge_tx_coal_ticks;
316 	uint32_t		bge_rx_max_coal_bds;
317 	uint32_t		bge_tx_max_coal_bds;
318 	uint32_t		bge_sts;
319 #define BGE_STS_LINK		__BIT(0)	/* MAC link status */
320 #define BGE_STS_LINK_EVT	__BIT(1)	/* pending link event */
321 #define BGE_STS_AUTOPOLL	__BIT(2)	/* PHY auto-polling  */
322 #define BGE_STS_BIT(sc, x)	((sc)->bge_sts & (x))
323 #define BGE_STS_SETBIT(sc, x)	((sc)->bge_sts |= (x))
324 #define BGE_STS_CLRBIT(sc, x)	((sc)->bge_sts &= ~(x))
325 	u_short			bge_if_flags;
326 	uint32_t		bge_flags;
327 	uint32_t		bge_phy_flags;
328 	int			bge_flowflags;
329 	time_t			bge_tx_lastsent;
330 	bool			bge_stopping;
331 	bool			bge_txrx_stopping;
332 	bool			bge_tx_sending;
333 
334 #ifdef BGE_EVENT_COUNTERS
335 	/*
336 	 * Event counters.
337 	 */
338 	struct evcnt bge_ev_intr;	/* interrupts */
339 	struct evcnt bge_ev_intr_spurious;  /* spurious intr. (tagged status)*/
340 	struct evcnt bge_ev_intr_spurious2; /* spurious interrupts */
341 	struct evcnt bge_ev_tx_xoff;	/* send PAUSE(len>0) packets */
342 	struct evcnt bge_ev_tx_xon;	/* send PAUSE(len=0) packets */
343 	struct evcnt bge_ev_rx_xoff;	/* receive PAUSE(len>0) packets */
344 	struct evcnt bge_ev_rx_xon;	/* receive PAUSE(len=0) packets */
345 	struct evcnt bge_ev_rx_macctl;	/* receive MAC control packets */
346 	struct evcnt bge_ev_xoffentered;/* XOFF state entered */
347 #endif /* BGE_EVENT_COUNTERS */
348 	uint64_t		bge_if_collisions;
349 	int			bge_txcnt;
350 	struct callout		bge_timeout;
351 	bool			bge_pending_rxintr_change;
352 	bool			bge_detaching;
353 	SLIST_HEAD(, txdmamap_pool_entry) txdma_list;
354 	struct txdmamap_pool_entry *txdma[BGE_TX_RING_CNT];
355 
356 	struct sysctllog	*bge_log;
357 
358 	krndsource_t	rnd_source;	/* random source */
359 
360 	kmutex_t *sc_core_lock;		/* lock for softc operations */
361 	kmutex_t *sc_intr_lock;		/* lock for interrupt operations */
362 	struct workqueue *sc_reset_wq;
363 	struct work sc_reset_work;
364 	volatile unsigned sc_reset_pending;
365 
366 	bool sc_trigger_reset;
367 };
368 
369 #endif /* _DEV_PCI_IF_BGEVAR_H_ */
370