xref: /freebsd/sys/dev/gve/gve_utils.c (revision 54dfc97b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2023 Google LLC
5  *
6  * Redistribution and use in source and binary forms, with or without modification,
7  * are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice, this
10  *    list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  *    this list of conditions and the following disclaimer in the documentation
14  *    and/or other materials provided with the distribution.
15  *
16  * 3. Neither the name of the copyright holder nor the names of its contributors
17  *    may be used to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23  * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
24  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
27  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 #include "gve.h"
32 
33 uint32_t
gve_reg_bar_read_4(struct gve_priv * priv,bus_size_t offset)34 gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset)
35 {
36 	return (be32toh(bus_read_4(priv->reg_bar, offset)));
37 }
38 
39 void
gve_reg_bar_write_4(struct gve_priv * priv,bus_size_t offset,uint32_t val)40 gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
41 {
42 	bus_write_4(priv->reg_bar, offset, htobe32(val));
43 }
44 
45 void
gve_db_bar_write_4(struct gve_priv * priv,bus_size_t offset,uint32_t val)46 gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
47 {
48 	bus_write_4(priv->db_bar, offset, htobe32(val));
49 }
50 
51 void
gve_alloc_counters(counter_u64_t * stat,int num_stats)52 gve_alloc_counters(counter_u64_t *stat, int num_stats)
53 {
54 	int i;
55 
56 	for (i = 0; i < num_stats; i++)
57 		stat[i] = counter_u64_alloc(M_WAITOK);
58 }
59 
60 void
gve_free_counters(counter_u64_t * stat,int num_stats)61 gve_free_counters(counter_u64_t *stat, int num_stats)
62 {
63 	int i;
64 
65 	for (i = 0; i < num_stats; i++)
66 		counter_u64_free(stat[i]);
67 }
68 
69 /* Currently assumes a single segment. */
70 static void
gve_dmamap_load_callback(void * arg,bus_dma_segment_t * segs,int nseg,int error)71 gve_dmamap_load_callback(void *arg, bus_dma_segment_t *segs, int nseg,
72     int error)
73 {
74 	if (error == 0)
75 		*(bus_addr_t *) arg = segs[0].ds_addr;
76 }
77 
78 int
gve_dma_alloc_coherent(struct gve_priv * priv,int size,int align,struct gve_dma_handle * dma)79 gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
80     struct gve_dma_handle *dma)
81 {
82 	int err;
83 	device_t dev = priv->dev;
84 
85 	err = bus_dma_tag_create(
86 	    bus_get_dma_tag(dev),	/* parent */
87 	    align, 0,			/* alignment, bounds */
88 	    BUS_SPACE_MAXADDR,		/* lowaddr */
89 	    BUS_SPACE_MAXADDR,		/* highaddr */
90 	    NULL, NULL,			/* filter, filterarg */
91 	    size,			/* maxsize */
92 	    1,				/* nsegments */
93 	    size,			/* maxsegsize */
94 	    BUS_DMA_ALLOCNOW,		/* flags */
95 	    NULL,			/* lockfunc */
96 	    NULL,			/* lockarg */
97 	    &dma->tag);
98 	if (err != 0) {
99 		device_printf(dev, "%s: bus_dma_tag_create failed: %d\n",
100 		    __func__, err);
101 		goto clear_tag;
102 	}
103 
104 	err = bus_dmamem_alloc(dma->tag, (void **) &dma->cpu_addr,
105 	    BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
106 	    &dma->map);
107 	if (err != 0) {
108 		device_printf(dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n",
109 		    __func__, (uintmax_t)size, err);
110 		goto destroy_tag;
111 	}
112 
113 	/* An address set by the callback will never be -1 */
114 	dma->bus_addr = (bus_addr_t)-1;
115 	err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size,
116 	    gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_NOWAIT);
117 	if (err != 0 || dma->bus_addr == (bus_addr_t)-1) {
118 		device_printf(dev, "%s: bus_dmamap_load failed: %d\n", __func__, err);
119 		goto free_mem;
120 	}
121 
122 	return (0);
123 
124 free_mem:
125 	bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map);
126 destroy_tag:
127 	bus_dma_tag_destroy(dma->tag);
128 clear_tag:
129 	dma->tag = NULL;
130 
131 	return (err);
132 }
133 
134 void
gve_dma_free_coherent(struct gve_dma_handle * dma)135 gve_dma_free_coherent(struct gve_dma_handle *dma)
136 {
137 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
138 	bus_dmamap_unload(dma->tag, dma->map);
139 	bus_dmamem_free(dma->tag, dma->cpu_addr, dma->map);
140 	bus_dma_tag_destroy(dma->tag);
141 }
142 
143 int
gve_dmamap_create(struct gve_priv * priv,int size,int align,struct gve_dma_handle * dma)144 gve_dmamap_create(struct gve_priv *priv, int size, int align,
145     struct gve_dma_handle *dma)
146 {
147 	int err;
148 	device_t dev = priv->dev;
149 
150 	err = bus_dma_tag_create(
151 	    bus_get_dma_tag(dev),	/* parent */
152 	    align, 0,			/* alignment, bounds */
153 	    BUS_SPACE_MAXADDR,		/* lowaddr */
154 	    BUS_SPACE_MAXADDR,		/* highaddr */
155 	    NULL, NULL,			/* filter, filterarg */
156 	    size,			/* maxsize */
157 	    1,				/* nsegments */
158 	    size,			/* maxsegsize */
159 	    BUS_DMA_ALLOCNOW,		/* flags */
160 	    NULL,			/* lockfunc */
161 	    NULL,			/* lockarg */
162 	    &dma->tag);
163 	if (err != 0) {
164 		device_printf(dev, "%s: bus_dma_tag_create failed: %d\n",
165 		    __func__, err);
166 		goto clear_tag;
167 	}
168 
169 	err = bus_dmamap_create(dma->tag, BUS_DMA_COHERENT, &dma->map);
170 	if (err != 0) {
171 		device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
172 		    __func__, err);
173 		goto destroy_tag;
174 	}
175 
176 	/* An address set by the callback will never be -1 */
177 	dma->bus_addr = (bus_addr_t)-1;
178 	err = bus_dmamap_load(dma->tag, dma->map, dma->cpu_addr, size,
179 	    gve_dmamap_load_callback, &dma->bus_addr, BUS_DMA_WAITOK);
180 	if (err != 0 || dma->bus_addr == (bus_addr_t)-1) {
181 		device_printf(dev, "%s: bus_dmamap_load failed: %d\n",
182 		    __func__, err);
183 		goto destroy_map;
184 	}
185 
186 	return (0);
187 
188 destroy_map:
189 	bus_dmamap_destroy(dma->tag, dma->map);
190 destroy_tag:
191 	bus_dma_tag_destroy(dma->tag);
192 clear_tag:
193 	dma->tag = NULL;
194 
195 	return (err);
196 }
197 
198 void
gve_dmamap_destroy(struct gve_dma_handle * dma)199 gve_dmamap_destroy(struct gve_dma_handle *dma)
200 {
201 	bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
202 	bus_dmamap_unload(dma->tag, dma->map);
203 	bus_dmamap_destroy(dma->tag, dma->map);
204 	bus_dma_tag_destroy(dma->tag);
205 }
206 
207 static int
gve_mgmnt_intr(void * arg)208 gve_mgmnt_intr(void *arg)
209 {
210 	struct gve_priv *priv = arg;
211 
212 	taskqueue_enqueue(priv->service_tq, &priv->service_task);
213 	return (FILTER_HANDLED);
214 }
215 
216 void
gve_free_irqs(struct gve_priv * priv)217 gve_free_irqs(struct gve_priv *priv)
218 {
219 	struct gve_irq *irq;
220 	int num_irqs;
221 	int rid;
222 	int rc;
223 	int i;
224 
225 	if (priv->irq_tbl == NULL) {
226 		device_printf(priv->dev, "No irq table, nothing to free\n");
227 		return;
228 	}
229 
230 	num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1;
231 
232 	for (i = 0; i < num_irqs; i++) {
233 		irq = &priv->irq_tbl[i];
234 		if (irq->res == NULL)
235 			continue;
236 
237 		rid = rman_get_rid(irq->res);
238 
239 		rc = bus_teardown_intr(priv->dev, irq->res, irq->cookie);
240 		if (rc != 0)
241 			device_printf(priv->dev, "Failed to teardown irq num %d\n",
242 			    rid);
243 
244 		rc = bus_release_resource(priv->dev, SYS_RES_IRQ,
245 		    rid, irq->res);
246 		if (rc != 0)
247 			device_printf(priv->dev, "Failed to release irq num %d\n",
248 			    rid);
249 
250 		irq->res = NULL;
251 		irq->cookie = NULL;
252 	}
253 
254 	free(priv->irq_tbl, M_GVE);
255 	priv->irq_tbl = NULL;
256 
257 	/* Safe to call even if msix was never alloced */
258 	pci_release_msi(priv->dev);
259 }
260 
261 int
gve_alloc_irqs(struct gve_priv * priv)262 gve_alloc_irqs(struct gve_priv *priv)
263 {
264 	int num_tx = priv->tx_cfg.num_queues;
265 	int num_rx = priv->rx_cfg.num_queues;
266 	int req_nvecs = num_tx + num_rx + 1;
267 	int got_nvecs = req_nvecs;
268 	struct gve_irq *irq;
269 	int i, j, m;
270 	int rid;
271 	int err;
272 
273 	struct gve_ring_com *com;
274 	struct gve_rx_ring *rx;
275 	struct gve_tx_ring *tx;
276 
277 	if (pci_alloc_msix(priv->dev, &got_nvecs) != 0) {
278 		device_printf(priv->dev, "Failed to acquire any msix vectors\n");
279 		err = ENXIO;
280 		goto abort;
281 	} else if (got_nvecs != req_nvecs) {
282 		device_printf(priv->dev, "Tried to acquire %d msix vectors, got only %d\n",
283 		    req_nvecs, got_nvecs);
284 		err = ENOSPC;
285 		goto abort;
286         }
287 
288 	if (bootverbose)
289 		device_printf(priv->dev, "Enabled MSIX with %d vectors\n", got_nvecs);
290 
291 	priv->irq_tbl = malloc(sizeof(struct gve_irq) * req_nvecs, M_GVE,
292 	    M_WAITOK | M_ZERO);
293 
294 	for (i = 0; i < num_tx; i++) {
295 		irq = &priv->irq_tbl[i];
296 		tx = &priv->tx[i];
297 		com = &tx->com;
298 		rid = i + 1;
299 
300 		irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
301 		    &rid, RF_ACTIVE);
302 		if (irq->res == NULL) {
303 			device_printf(priv->dev, "Failed to alloc irq %d for Tx queue %d\n",
304 			    rid, i);
305 			err = ENOMEM;
306 			goto abort;
307 		}
308 
309 		err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
310 		    gve_tx_intr, NULL, &priv->tx[i], &irq->cookie);
311 		if (err != 0) {
312 			device_printf(priv->dev, "Failed to setup irq %d for Tx queue %d, "
313 			    "err: %d\n", rid, i, err);
314 			goto abort;
315 		}
316 
317 		bus_describe_intr(priv->dev, irq->res, irq->cookie, "tx%d", i);
318 		com->ntfy_id = i;
319 	}
320 
321 	for (j = 0; j < num_rx; j++) {
322 		irq = &priv->irq_tbl[i + j];
323 		rx = &priv->rx[j];
324 		com = &rx->com;
325 		rid = i + j + 1;
326 
327 		irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
328 		    &rid, RF_ACTIVE);
329 		if (irq->res == NULL) {
330 			device_printf(priv->dev,
331 			    "Failed to alloc irq %d for Rx queue %d", rid, j);
332 			err = ENOMEM;
333 			goto abort;
334 		}
335 
336 		err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
337 		    gve_rx_intr, NULL, &priv->rx[j], &irq->cookie);
338 		if (err != 0) {
339 			device_printf(priv->dev, "Failed to setup irq %d for Rx queue %d, "
340 			    "err: %d\n", rid, j, err);
341 			goto abort;
342 		}
343 
344 		bus_describe_intr(priv->dev, irq->res, irq->cookie, "rx%d", j);
345 		com->ntfy_id = i + j;
346 	}
347 
348 	m = i + j;
349 	rid = m + 1;
350 	irq = &priv->irq_tbl[m];
351 
352 	irq->res = bus_alloc_resource_any(priv->dev, SYS_RES_IRQ,
353 	    &rid, RF_ACTIVE);
354 	if (irq->res == NULL) {
355 		device_printf(priv->dev, "Failed to allocate irq %d for mgmnt queue\n", rid);
356 		err = ENOMEM;
357 		goto abort;
358 	}
359 
360 	err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
361 	    gve_mgmnt_intr, NULL, priv, &irq->cookie);
362 	if (err != 0) {
363 		device_printf(priv->dev, "Failed to setup irq %d for mgmnt queue, err: %d\n",
364 		    rid, err);
365 		goto abort;
366 	}
367 
368 	bus_describe_intr(priv->dev, irq->res, irq->cookie, "mgmnt");
369 
370 	return (0);
371 
372 abort:
373 	gve_free_irqs(priv);
374 	return (err);
375 }
376 
377 void
gve_unmask_all_queue_irqs(struct gve_priv * priv)378 gve_unmask_all_queue_irqs(struct gve_priv *priv)
379 {
380 	struct gve_tx_ring *tx;
381 	struct gve_rx_ring *rx;
382 	int idx;
383 
384 	for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
385 		tx = &priv->tx[idx];
386 		gve_db_bar_write_4(priv, tx->com.irq_db_offset, 0);
387 	}
388 	for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
389 		rx = &priv->rx[idx];
390 		gve_db_bar_write_4(priv, rx->com.irq_db_offset, 0);
391 	}
392 }
393 
394 void
gve_mask_all_queue_irqs(struct gve_priv * priv)395 gve_mask_all_queue_irqs(struct gve_priv *priv)
396 {
397 	for (int idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
398 		struct gve_tx_ring *tx = &priv->tx[idx];
399 		gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK);
400 	}
401 	for (int idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
402 		struct gve_rx_ring *rx = &priv->rx[idx];
403 		gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);
404 	}
405 }
406