xref: /netbsd/sys/dev/ic/dwc_gmac_var.h (revision b6852db6)
1 /* $NetBSD: dwc_gmac_var.h,v 1.17 2022/09/18 18:26:53 thorpej Exp $ */
2 
3 /*-
4  * Copyright (c) 2013, 2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry and Martin Husemann.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifdef _KERNEL_OPT
33 #include "opt_net_mpsafe.h"
34 #endif
35 
36 /* Use DWCGMAC_MPSAFE inside the front-ends for interrupt handlers.  */
37 #ifdef NET_MPSAFE
38 #define DWCGMAC_MPSAFE	1
39 #endif
40 
41 #ifdef DWCGMAC_MPSAFE
42 #define DWCGMAC_FDT_INTR_MPSAFE FDT_INTR_MPSAFE
43 #else
44 #define DWCGMAC_FDT_INTR_MPSAFE 0
45 #endif
46 
47 /*
48  * We could use 1024 DMA descriptors to fill up an 8k page (each is 16 byte).
49  * However, on TX we probably will not need that many, and on RX we allocate
50  * a full mbuf cluster for each, so secondary memory consumption will grow
51  * rapidly.
52  * So currently we waste half a page of dma memory and consume 512k Byte of
53  * RAM for mbuf clusters.
54  * XXX Maybe fine-tune later, or reconsider unsharing of RX/TX dmamap.
55  */
56 #define		AWGE_RX_RING_COUNT	256
57 #define		AWGE_TX_RING_COUNT	256
58 #define		AWGE_TOTAL_RING_COUNT	\
59 			(AWGE_RX_RING_COUNT + AWGE_TX_RING_COUNT)
60 
61 #define		AWGE_MAX_PACKET		0x7ff
62 
63 struct dwc_gmac_dev_dmadesc;
64 
65 struct dwc_gmac_desc_methods {
66 	void (*tx_init_flags)(struct dwc_gmac_dev_dmadesc *);
67 	void (*tx_set_owned_by_dev)(struct dwc_gmac_dev_dmadesc *);
68 	int  (*tx_is_owned_by_dev)(struct dwc_gmac_dev_dmadesc *);
69 	void (*tx_set_len)(struct dwc_gmac_dev_dmadesc *, int);
70 	void (*tx_set_first_frag)(struct dwc_gmac_dev_dmadesc *);
71 	void (*tx_set_last_frag)(struct dwc_gmac_dev_dmadesc *);
72 
73 	void (*rx_init_flags)(struct dwc_gmac_dev_dmadesc *);
74 	void (*rx_set_owned_by_dev)(struct dwc_gmac_dev_dmadesc *);
75 	int  (*rx_is_owned_by_dev)(struct dwc_gmac_dev_dmadesc *);
76 	void (*rx_set_len)(struct dwc_gmac_dev_dmadesc *, int);
77 	uint32_t  (*rx_get_len)(struct dwc_gmac_dev_dmadesc *);
78 	int  (*rx_has_error)(struct dwc_gmac_dev_dmadesc *);
79 };
80 
81 struct dwc_gmac_rx_data {
82 	bus_dmamap_t	rd_map;
83 	struct mbuf	*rd_m;
84 };
85 
86 struct dwc_gmac_tx_data {
87 	bus_dmamap_t	td_map;
88 	bus_dmamap_t	td_active;
89 	struct mbuf	*td_m;
90 };
91 
92 struct dwc_gmac_tx_ring {
93 	bus_addr_t			t_physaddr; /* PA of TX ring start */
94 	struct dwc_gmac_dev_dmadesc	*t_desc;    /* VA of TX ring start */
95 	struct dwc_gmac_tx_data	t_data[AWGE_TX_RING_COUNT];
96 	int				t_cur, t_next, t_queued;
97 	kmutex_t			t_mtx;
98 };
99 
100 struct dwc_gmac_rx_ring {
101 	bus_addr_t			r_physaddr; /* PA of RX ring start */
102 	struct dwc_gmac_dev_dmadesc	*r_desc;    /* VA of RX ring start */
103 	struct dwc_gmac_rx_data	r_data[AWGE_RX_RING_COUNT];
104 	int				r_cur, r_next;
105 	kmutex_t			r_mtx;
106 };
107 
108 struct dwc_gmac_softc {
109 	device_t sc_dev;
110 	bus_space_tag_t sc_bst;
111 	bus_space_handle_t sc_bsh;
112 	bus_dma_tag_t sc_dmat;
113 	uint32_t sc_flags;
114 #define	DWC_GMAC_FORCE_THRESH_DMA_MODE	0x01	/* force DMA to use threshold mode */
115 	struct ethercom sc_ec;
116 	struct mii_data sc_mii;
117 	kmutex_t sc_mdio_lock;
118 	bus_dmamap_t sc_dma_ring_map;		/* common dma memory for RX */
119 	bus_dma_segment_t sc_dma_ring_seg;	/* and TX ring */
120 	struct dwc_gmac_rx_ring sc_rxq;
121 	struct dwc_gmac_tx_ring sc_txq;
122 	const struct dwc_gmac_desc_methods *sc_descm;
123 	u_short sc_if_flags;			/* shadow of ether flags */
124 	uint16_t sc_mii_clk;
125 	bool sc_txbusy;
126 	bool sc_stopping;
127 	krndsource_t rnd_source;
128 	kmutex_t *sc_lock;			/* lock for softc operations */
129 
130 	struct if_percpuq *sc_ipq;		/* softint-based input queues */
131 
132 	void (*sc_set_speed)(struct dwc_gmac_softc *, int);
133 };
134 
135 int dwc_gmac_attach(struct dwc_gmac_softc*, int /*phy_id*/,
136     uint32_t /*mii_clk*/);
137 int dwc_gmac_intr(struct dwc_gmac_softc*);
138