1 /*	$NetBSD: mvxpbm.c,v 1.1 2015/06/03 03:55:47 hsuenaga Exp $	*/
2 /*
3  * Copyright (c) 2015 Internet Initiative Japan Inc.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: mvxpbm.c,v 1.1 2015/06/03 03:55:47 hsuenaga Exp $");
29 
30 #include "opt_multiprocessor.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/device.h>
35 #include <sys/mbuf.h>
36 
37 #include <dev/marvell/marvellreg.h>
38 #include <dev/marvell/marvellvar.h>
39 
40 #include "mvxpbmvar.h"
41 
42 #ifdef DEBUG
43 #define STATIC /* nothing */
44 #define DPRINTF(fmt, ...) \
45 	do { \
46 		if (mvxpbm_debug >= 1) { \
47 			printf("%s: ", __func__); \
48 			printf((fmt), ##__VA_ARGS__); \
49 		} \
50 	} while (/*CONSTCOND*/0)
51 #define DPRINTFN(level , fmt, ...) \
52 	do { \
53 		if (mvxpbm_debug >= (level)) { \
54 			printf("%s: ", __func__); \
55 			printf((fmt), ##__VA_ARGS__); \
56 		} \
57 	} while (/*CONSTCOND*/0)
58 #define DPRINTDEV(dev, level, fmt, ...) \
59 	do { \
60 		if (mvxpbm_debug >= (level)) { \
61 			device_printf((dev), \
62 			    "%s: "fmt , __func__, ##__VA_ARGS__); \
63 		} \
64 	} while (/*CONSTCOND*/0)
65 #define DPRINTSC(sc, level, fmt, ...) \
66 	do { \
67 		device_t dev = (sc)->sc_dev; \
68 		if (mvxpbm_debug >= (level)) { \
69 			device_printf(dev, \
70 			    "%s: " fmt, __func__, ##__VA_ARGS__); \
71 		} \
72 	} while (/*CONSTCOND*/0)
73 #else
74 #define STATIC static
75 #define DPRINTF(fmt, ...)
76 #define DPRINTFN(level, fmt, ...)
77 #define DPRINTDEV(dev, level, fmt, ...)
78 #define DPRINTSC(sc, level, fmt, ...)
79 #endif
80 
81 /* autoconf(9) */
82 STATIC int mvxpbm_match(device_t, cfdata_t, void *);
83 STATIC void mvxpbm_attach(device_t, device_t, void *);
84 STATIC int mvxpbm_evcnt_attach(struct mvxpbm_softc *);
85 CFATTACH_DECL_NEW(mvxpbm_mbus, sizeof(struct mvxpbm_softc),
86     mvxpbm_match, mvxpbm_attach, NULL, NULL);
87 
88 /* DMA buffers */
89 STATIC int mvxpbm_alloc_buffer(struct mvxpbm_softc *);
90 
91 /* mbuf subroutines */
92 STATIC void mvxpbm_free_mbuf(struct mbuf *, void *, size_t, void *);
93 
94 /* singleton device instance */
95 static struct mvxpbm_softc sc_emul;
96 static struct mvxpbm_softc *sc0;
97 
98 /* debug level */
99 #ifdef DEBUG
100 static int mvxpbm_debug = 0;
101 #endif
102 
103 /*
104  * autoconf(9)
105  */
106 STATIC int
mvxpbm_match(device_t parent,cfdata_t match,void * aux)107 mvxpbm_match(device_t parent, cfdata_t match, void *aux)
108 {
109 	struct marvell_attach_args *mva = aux;
110 
111 	if (strcmp(mva->mva_name, match->cf_name) != 0)
112 		return 0;
113 	if (mva->mva_unit > MVXPBM_UNIT_MAX)
114 		return 0;
115 	if (sc0 != NULL)
116 		return 0;
117 	if (mva->mva_offset != MVA_OFFSET_DEFAULT) {
118 		/* Hardware BM is not supported yet. */
119 		return 0;
120 	}
121 
122 	return 1;
123 }
124 
125 STATIC void
mvxpbm_attach(device_t parnet,device_t self,void * aux)126 mvxpbm_attach(device_t parnet, device_t self, void *aux)
127 {
128 	struct marvell_attach_args *mva = aux;
129 	struct mvxpbm_softc *sc = device_private(self);
130 
131 	aprint_naive("\n");
132 	aprint_normal(": Marvell ARMADA Buffer Manager\n");
133 	memset(sc, 0, sizeof(*sc));
134 	sc->sc_dev = self;
135 	sc->sc_iot = mva->mva_iot;
136 	sc->sc_dmat = mva->mva_dmat;
137 
138 	if (mva->mva_offset == MVA_OFFSET_DEFAULT) {
139 		aprint_normal_dev(sc->sc_dev, "Software emulation.\n");
140 		sc->sc_emul = 1;
141 	}
142 
143 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
144 	LIST_INIT(&sc->sc_free);
145 	LIST_INIT(&sc->sc_inuse);
146 
147 	/* DMA buffers */
148 	if (mvxpbm_alloc_buffer(sc) != 0)
149 		return;
150 
151 	/* event counters */
152 	mvxpbm_evcnt_attach(sc);
153 
154 	sc0 = sc;
155 	return;
156 
157 }
158 
159 STATIC int
mvxpbm_evcnt_attach(struct mvxpbm_softc * sc)160 mvxpbm_evcnt_attach(struct mvxpbm_softc *sc)
161 {
162 	return 0;
163 }
164 
165 /*
166  * DMA buffers
167  */
168 STATIC int
mvxpbm_alloc_buffer(struct mvxpbm_softc * sc)169 mvxpbm_alloc_buffer(struct mvxpbm_softc *sc)
170 {
171 	bus_dma_segment_t segs;
172 	char *kva, *ptr, *ptr_next, *ptr_data;
173 	char *bm_buf_end;
174 	uint32_t align, pad;
175 	int nsegs;
176 	int error;
177 
178 	/*
179 	 * set default buffer sizes. this will changed to satisfy
180 	 * alignment restrictions.
181 	 */
182 	sc->sc_chunk_count = 0;
183 	sc->sc_chunk_size = MVXPBM_PACKET_SIZE;
184 	sc->sc_chunk_header_size = sizeof(struct mvxpbm_chunk);
185 	sc->sc_chunk_packet_offset = 64;
186 
187 	/*
188 	 * adjust bm_chunk_size, bm_chunk_header_size, bm_slotsize
189 	 * to satisfy alignemnt restrictions.
190 	 *
191 	 *    <----------------  bm_slotsize [oct.] ------------------>
192 	 *                               <--- bm_chunk_size[oct.] ---->
193 	 *    <--- header_size[oct] ---> <-- MBXPE_BM_SIZE[oct.] --->
194 	 *   +-----------------+--------+---------+-----------------+--+
195 	 *   | bm_chunk hdr    |pad     |pkt_off  |   packet data   |  |
196 	 *   +-----------------+--------+---------+-----------------+--+
197 	 *   ^                          ^         ^                    ^
198 	 *   |                          |         |                    |
199 	 *   ptr                 ptr_data  DMA here         ptr_next
200 	 *
201 	 * Restrictions:
202 	 *   - total buffer size must be multiple of MVXPBM_BUF_ALIGN
203 	 *   - ptr must be aligned to MVXPBM_CHUNK_ALIGN
204 	 *   - ptr_data must be aligned to MVXPEBM_DATA_ALIGN
205 	 *   - bm_chunk_size must be multiple of 8[bytes].
206 	 */
207 	/* start calclation from  0x0000.0000 */
208 	ptr = (char *)0;
209 
210 	/* align start of packet data */
211 	ptr_data = ptr + sc->sc_chunk_header_size;
212 	align = (unsigned long)ptr_data & MVXPBM_DATA_MASK;
213 	if (align != 0) {
214 		pad = MVXPBM_DATA_ALIGN - align;
215 		sc->sc_chunk_header_size += pad;
216 		DPRINTSC(sc, 1, "added padding to BM header, %u bytes\n", pad);
217 	}
218 
219 	/* align size of packet data */
220 	ptr_data = ptr + sc->sc_chunk_header_size;
221 	ptr_next = ptr_data + MVXPBM_PACKET_SIZE;
222 	align = (unsigned long)ptr_next & MVXPBM_CHUNK_MASK;
223 	if (align != 0) {
224 		pad = MVXPBM_CHUNK_ALIGN - align;
225 		ptr_next += pad;
226 		DPRINTSC(sc, 1, "added padding to BM pktbuf, %u bytes\n", pad);
227 	}
228 	sc->sc_slotsize = ptr_next - ptr;
229 	sc->sc_chunk_size = ptr_next - ptr_data;
230 	KASSERT((sc->sc_chunk_size % MVXPBM_DATA_UNIT) == 0);
231 
232 	/* align total buffer size to Mbus window boundary */
233 	sc->sc_buf_size = sc->sc_slotsize * MVXPBM_NUM_SLOTS;
234 	align = (unsigned long)sc->sc_buf_size & MVXPBM_BUF_MASK;
235 	if (align != 0) {
236 		pad = MVXPBM_BUF_ALIGN - align;
237 		sc->sc_buf_size += pad;
238 		DPRINTSC(sc, 1,
239 		    "expand buffer to fit page boundary, %u bytes\n", pad);
240 	}
241 
242 	/*
243 	 * get the aligned buffer from busdma(9) framework
244 	 */
245 	if (bus_dmamem_alloc(sc->sc_dmat, sc->sc_buf_size, MVXPBM_BUF_ALIGN, 0,
246 	    &segs, 1, &nsegs, BUS_DMA_NOWAIT)) {
247 		aprint_error_dev(sc->sc_dev, "can't alloc BM buffers\n");
248 		return ENOBUFS;
249 	}
250 	if (bus_dmamem_map(sc->sc_dmat, &segs, nsegs, sc->sc_buf_size,
251 	    (void **)&kva, BUS_DMA_NOWAIT)) {
252 		aprint_error_dev(sc->sc_dev,
253 		    "can't map dma buffers (%zu bytes)\n", sc->sc_buf_size);
254 		error = ENOBUFS;
255 		goto fail1;
256 	}
257 	if (bus_dmamap_create(sc->sc_dmat, sc->sc_buf_size, 1, sc->sc_buf_size,
258 	    0, BUS_DMA_NOWAIT, &sc->sc_buf_map)) {
259 		aprint_error_dev(sc->sc_dev, "can't create dma map\n");
260 		error = ENOBUFS;
261 		goto fail2;
262 	}
263 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_buf_map,
264 	    kva, sc->sc_buf_size, NULL, BUS_DMA_NOWAIT)) {
265 		aprint_error_dev(sc->sc_dev, "can't load dma map\n");
266 		error = ENOBUFS;
267 		goto fail3;
268 	}
269 	sc->sc_buf = (void *)kva;
270 	sc->sc_buf_pa = segs.ds_addr;
271 	bm_buf_end = (void *)(kva + sc->sc_buf_size);
272 	DPRINTSC(sc, 1, "memory pool at %p\n", sc->sc_buf);
273 
274 	/* slice the buffer */
275 	mvxpbm_lock(sc);
276 	for (ptr = sc->sc_buf; ptr + sc->sc_slotsize <= bm_buf_end;
277 	    ptr += sc->sc_slotsize) {
278 		struct mvxpbm_chunk *chunk;
279 
280 		/* initialzie chunk */
281 		ptr_data = ptr + sc->sc_chunk_header_size;
282 		chunk = (struct mvxpbm_chunk *)ptr;
283 		chunk->m = NULL;
284 		chunk->sc = sc;
285 		chunk->off = (ptr - sc->sc_buf);
286 		chunk->pa = (paddr_t)(sc->sc_buf_pa + chunk->off);
287 		chunk->buf_off = (ptr_data - sc->sc_buf);
288 		chunk->buf_pa = (paddr_t)(sc->sc_buf_pa + chunk->buf_off);
289 		chunk->buf_va = (vaddr_t)(sc->sc_buf + chunk->buf_off);
290 		chunk->buf_size = sc->sc_chunk_size;
291 
292 		/* add to free list (for software management) */
293 		LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
294 		mvxpbm_dmamap_sync(chunk, BM_SYNC_ALL, BUS_DMASYNC_PREREAD);
295 		sc->sc_chunk_count++;
296 
297 		DPRINTSC(sc, 9, "new chunk %p\n", (void *)chunk->buf_va);
298 	}
299 	mvxpbm_unlock(sc);
300 	return 0;
301 
302 fail3:
303 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_buf_map);
304 fail2:
305 	bus_dmamem_unmap(sc->sc_dmat, kva, sc->sc_buf_size);
306 fail1:
307 	bus_dmamem_free(sc->sc_dmat, &segs, nsegs);
308 
309 	return error;
310 }
311 
312 /*
313  * mbuf subroutines
314  */
315 STATIC void
mvxpbm_free_mbuf(struct mbuf * m,void * buf,size_t size,void * arg)316 mvxpbm_free_mbuf(struct mbuf *m, void *buf, size_t size, void *arg)
317 {
318 	struct mvxpbm_chunk *chunk = (struct mvxpbm_chunk *)arg;
319 	int s;
320 
321 	KASSERT(m != NULL);
322 	KASSERT(arg != NULL);
323 
324 	DPRINTFN(3, "free packet %p\n", m);
325 	if (m->m_flags & M_PKTHDR)
326 		m_tag_delete_chain((m), NULL);
327 	chunk->m = NULL;
328 	s = splvm();
329 	pool_cache_put(mb_cache, m);
330 	splx(s);
331 	return mvxpbm_free_chunk(chunk);
332 }
333 
334 /*
335  * Exported APIs
336  */
337 /* get mvxpbm device context */
338 struct mvxpbm_softc *
mvxpbm_device(struct marvell_attach_args * mva)339 mvxpbm_device(struct marvell_attach_args *mva)
340 {
341 	struct mvxpbm_softc *sc;
342 
343 	if (sc0 != NULL)
344 		return sc0;
345 	if (mva == NULL)
346 		return NULL;
347 
348 	/* allocate software emulation context */
349 	sc = &sc_emul;
350 	memset(sc, 0, sizeof(*sc));
351 	sc->sc_emul = 1;
352 	sc->sc_iot = mva->mva_iot;
353 	sc->sc_dmat = mva->mva_dmat;
354 
355 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
356 	LIST_INIT(&sc->sc_free);
357 	LIST_INIT(&sc->sc_inuse);
358 
359 	if (mvxpbm_alloc_buffer(sc) != 0)
360 		return NULL;
361 	mvxpbm_evcnt_attach(sc);
362 	sc0 = sc;
363 	return sc0;
364 }
365 
366 /* allocate new memory chunk */
367 struct mvxpbm_chunk *
mvxpbm_alloc(struct mvxpbm_softc * sc)368 mvxpbm_alloc(struct mvxpbm_softc *sc)
369 {
370 	struct mvxpbm_chunk *chunk;
371 
372 	mvxpbm_lock(sc);
373 
374 	chunk = LIST_FIRST(&sc->sc_free);
375 	if (chunk == NULL) {
376 		mvxpbm_unlock(sc);
377 		return NULL;
378 	}
379 
380 	LIST_REMOVE(chunk, link);
381 	LIST_INSERT_HEAD(&sc->sc_inuse, chunk, link);
382 
383 	mvxpbm_unlock(sc);
384 	return chunk;
385 }
386 
387 /* free memory chunk */
388 void
mvxpbm_free_chunk(struct mvxpbm_chunk * chunk)389 mvxpbm_free_chunk(struct mvxpbm_chunk *chunk)
390 {
391 	struct mvxpbm_softc *sc = chunk->sc;
392 
393 	KASSERT(chunk->m == NULL);
394 	DPRINTFN(3, "bm chunk free\n");
395 
396 	mvxpbm_lock(sc);
397 
398 	LIST_REMOVE(chunk, link);
399 	LIST_INSERT_HEAD(&sc->sc_free, chunk, link);
400 
401 	mvxpbm_unlock(sc);
402 }
403 
404 /* prepare mbuf header after Rx */
405 int
mvxpbm_init_mbuf_hdr(struct mvxpbm_chunk * chunk)406 mvxpbm_init_mbuf_hdr(struct mvxpbm_chunk *chunk)
407 {
408 	struct mvxpbm_softc *sc = chunk->sc;
409 
410 	KASSERT(chunk->m == NULL);
411 
412 	/* add new mbuf header */
413 	MGETHDR(chunk->m, M_DONTWAIT, MT_DATA);
414 	if (chunk->m == NULL) {
415 		aprint_error_dev(sc->sc_dev, "cannot get mbuf\n");
416 		return ENOBUFS;
417 	}
418 	MEXTADD(chunk->m, chunk->buf_va, chunk->buf_size, 0,
419 		mvxpbm_free_mbuf, chunk);
420 	chunk->m->m_flags |= M_EXT_RW;
421 	chunk->m->m_len = chunk->m->m_pkthdr.len = chunk->buf_size;
422 	if (sc->sc_chunk_packet_offset)
423 		m_adj(chunk->m, sc->sc_chunk_packet_offset);
424 
425 	return 0;
426 }
427 
428 /* sync DMA seguments */
429 void
mvxpbm_dmamap_sync(struct mvxpbm_chunk * chunk,size_t size,int ops)430 mvxpbm_dmamap_sync(struct mvxpbm_chunk *chunk, size_t size, int ops)
431 {
432 	struct mvxpbm_softc *sc = chunk->sc;
433 
434 	KASSERT(size <= chunk->buf_size);
435 	if (size == 0)
436 		size = chunk->buf_size;
437 
438 	bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_map, chunk->buf_off, size, ops);
439 }
440 
441 /* lock */
442 void
mvxpbm_lock(struct mvxpbm_softc * sc)443 mvxpbm_lock(struct mvxpbm_softc *sc)
444 {
445 	mutex_enter(&sc->sc_mtx);
446 }
447 
448 void
mvxpbm_unlock(struct mvxpbm_softc * sc)449 mvxpbm_unlock(struct mvxpbm_softc *sc)
450 {
451 	mutex_exit(&sc->sc_mtx);
452 }
453 
454 /* get params */
455 const char *
mvxpbm_xname(struct mvxpbm_softc * sc)456 mvxpbm_xname(struct mvxpbm_softc *sc)
457 {
458 	if (sc->sc_emul) {
459 		return "software_bm";
460 	}
461 	return device_xname(sc->sc_dev);
462 }
463 
464 size_t
mvxpbm_chunk_size(struct mvxpbm_softc * sc)465 mvxpbm_chunk_size(struct mvxpbm_softc *sc)
466 {
467 	return sc->sc_chunk_size;
468 }
469 
470 uint32_t
mvxpbm_chunk_count(struct mvxpbm_softc * sc)471 mvxpbm_chunk_count(struct mvxpbm_softc *sc)
472 {
473 	return sc->sc_chunk_count;
474 }
475 
476 off_t
mvxpbm_packet_offset(struct mvxpbm_softc * sc)477 mvxpbm_packet_offset(struct mvxpbm_softc *sc)
478 {
479 	return sc->sc_chunk_packet_offset;
480 }
481 
482 paddr_t
mvxpbm_buf_pbase(struct mvxpbm_softc * sc)483 mvxpbm_buf_pbase(struct mvxpbm_softc *sc)
484 {
485 	return sc->sc_buf_pa;
486 }
487 
488 size_t
mvxpbm_buf_size(struct mvxpbm_softc * sc)489 mvxpbm_buf_size(struct mvxpbm_softc *sc)
490 {
491 	return sc->sc_buf_size;
492 }
493