xref: /freebsd/sys/arm/arm/pl310.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2012 Olivier Houchard <cognet@FreeBSD.org>
5  * Copyright (c) 2011
6  *	Ben Gray <ben.r.gray@gmail.com>.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the company nor the name of the author may be used to
18  *    endorse or promote products derived from this software without specific
19  *    prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
30  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/rman.h>
39 #include <sys/module.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <machine/intr.h>
43 
44 #include <machine/bus.h>
45 #include <machine/pl310.h>
46 #ifdef PLATFORM
47 #include <machine/platformvar.h>
48 #endif
49 
50 #include <dev/ofw/openfirm.h>
51 #include <dev/ofw/ofw_bus.h>
52 #include <dev/ofw/ofw_bus_subr.h>
53 
54 #ifdef PLATFORM
55 #include "platform_pl310_if.h"
56 #endif
57 
58 /*
59  * Define this if you need to disable PL310 for debugging purpose
60  * Spec:
61  * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0246e/DDI0246E_l2c310_r3p1_trm.pdf
62  */
63 
64 /*
65  * Hardcode errata for now
66  * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246b/pr01s02s02.html
67  */
68 #define	PL310_ERRATA_588369
69 #define	PL310_ERRATA_753970
70 #define	PL310_ERRATA_727915
71 
72 #define	PL310_LOCK(sc) do {		\
73 	mtx_lock_spin(&(sc)->sc_mtx);	\
74 } while(0);
75 
76 #define	PL310_UNLOCK(sc) do {		\
77 	mtx_unlock_spin(&(sc)->sc_mtx);	\
78 } while(0);
79 
80 static int pl310_enabled = 1;
81 TUNABLE_INT("hw.pl310.enabled", &pl310_enabled);
82 
83 static uint32_t g_l2cache_way_mask;
84 
85 static const uint32_t g_l2cache_line_size = 32;
86 static const uint32_t g_l2cache_align_mask = (32 - 1);
87 
88 static uint32_t g_l2cache_size;
89 static uint32_t g_way_size;
90 static uint32_t g_ways_assoc;
91 
92 static struct pl310_softc *pl310_softc;
93 
94 static struct ofw_compat_data compat_data[] = {
95 	{"arm,pl310",		true}, /* Non-standard, FreeBSD. */
96 	{"arm,pl310-cache",	true},
97 	{NULL,			false}
98 };
99 
100 #ifdef PLATFORM
101 static void
102 platform_pl310_init(struct pl310_softc *sc)
103 {
104 
105 	PLATFORM_PL310_INIT(platform_obj(), sc);
106 }
107 
108 static void
109 platform_pl310_write_ctrl(struct pl310_softc *sc, uint32_t val)
110 {
111 
112 	PLATFORM_PL310_WRITE_CTRL(platform_obj(), sc, val);
113 }
114 
115 static void
116 platform_pl310_write_debug(struct pl310_softc *sc, uint32_t val)
117 {
118 
119 	PLATFORM_PL310_WRITE_DEBUG(platform_obj(), sc, val);
120 }
121 #endif
122 
123 static void
124 pl310_print_config(struct pl310_softc *sc)
125 {
126 	uint32_t aux, prefetch;
127 	const char *dis = "disabled";
128 	const char *ena = "enabled";
129 
130 	aux = pl310_read4(sc, PL310_AUX_CTRL);
131 	prefetch = pl310_read4(sc, PL310_PREFETCH_CTRL);
132 
133 	device_printf(sc->sc_dev, "Early BRESP response: %s\n",
134 		(aux & AUX_CTRL_EARLY_BRESP) ? ena : dis);
135 	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
136 		(aux & AUX_CTRL_INSTR_PREFETCH) ? ena : dis);
137 	device_printf(sc->sc_dev, "Data prefetch: %s\n",
138 		(aux & AUX_CTRL_DATA_PREFETCH) ? ena : dis);
139 	device_printf(sc->sc_dev, "Non-secure interrupt control: %s\n",
140 		(aux & AUX_CTRL_NS_INT_CTRL) ? ena : dis);
141 	device_printf(sc->sc_dev, "Non-secure lockdown: %s\n",
142 		(aux & AUX_CTRL_NS_LOCKDOWN) ? ena : dis);
143 	device_printf(sc->sc_dev, "Share override: %s\n",
144 		(aux & AUX_CTRL_SHARE_OVERRIDE) ? ena : dis);
145 
146 	device_printf(sc->sc_dev, "Double linefill: %s\n",
147 		(prefetch & PREFETCH_CTRL_DL) ? ena : dis);
148 	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
149 		(prefetch & PREFETCH_CTRL_INSTR_PREFETCH) ? ena : dis);
150 	device_printf(sc->sc_dev, "Data prefetch: %s\n",
151 		(prefetch & PREFETCH_CTRL_DATA_PREFETCH) ? ena : dis);
152 	device_printf(sc->sc_dev, "Double linefill on WRAP request: %s\n",
153 		(prefetch & PREFETCH_CTRL_DL_ON_WRAP) ? ena : dis);
154 	device_printf(sc->sc_dev, "Prefetch drop: %s\n",
155 		(prefetch & PREFETCH_CTRL_PREFETCH_DROP) ? ena : dis);
156 	device_printf(sc->sc_dev, "Incr double Linefill: %s\n",
157 		(prefetch & PREFETCH_CTRL_INCR_DL) ? ena : dis);
158 	device_printf(sc->sc_dev, "Not same ID on exclusive sequence: %s\n",
159 		(prefetch & PREFETCH_CTRL_NOTSAMEID) ? ena : dis);
160 	device_printf(sc->sc_dev, "Prefetch offset: %d\n",
161 		(prefetch & PREFETCH_CTRL_OFFSET_MASK));
162 }
163 
164 void
165 pl310_set_ram_latency(struct pl310_softc *sc, uint32_t which_reg,
166    uint32_t read, uint32_t write, uint32_t setup)
167 {
168 	uint32_t v;
169 
170 	KASSERT(which_reg == PL310_TAG_RAM_CTRL ||
171 	    which_reg == PL310_DATA_RAM_CTRL,
172 	    ("bad pl310 ram latency register address"));
173 
174 	v = pl310_read4(sc, which_reg);
175 	if (setup != 0) {
176 		KASSERT(setup <= 8, ("bad pl310 setup latency: %d", setup));
177 		v &= ~RAM_CTRL_SETUP_MASK;
178 		v |= (setup - 1) << RAM_CTRL_SETUP_SHIFT;
179 	}
180 	if (read != 0) {
181 		KASSERT(read <= 8, ("bad pl310 read latency: %d", read));
182 		v &= ~RAM_CTRL_READ_MASK;
183 		v |= (read - 1) << RAM_CTRL_READ_SHIFT;
184 	}
185 	if (write != 0) {
186 		KASSERT(write <= 8, ("bad pl310 write latency: %d", write));
187 		v &= ~RAM_CTRL_WRITE_MASK;
188 		v |= (write - 1) << RAM_CTRL_WRITE_SHIFT;
189 	}
190 	pl310_write4(sc, which_reg, v);
191 }
192 
193 static int
194 pl310_filter(void *arg)
195 {
196 	struct pl310_softc *sc = arg;
197 	uint32_t intr;
198 
199 	intr = pl310_read4(sc, PL310_INTR_MASK);
200 
201 	if (!sc->sc_enabled && (intr & INTR_MASK_ECNTR)) {
202 		/*
203 		 * This is for debug purpose, so be blunt about it
204 		 * We disable PL310 only when something fishy is going
205 		 * on and we need to make sure L2 cache is 100% disabled
206 		 */
207 		panic("pl310: caches disabled but cache event detected\n");
208 	}
209 
210 	return (FILTER_HANDLED);
211 }
212 
213 static __inline void
214 pl310_wait_background_op(uint32_t off, uint32_t mask)
215 {
216 
217 	while (pl310_read4(pl310_softc, off) & mask)
218 		continue;
219 }
220 
221 /**
222  *	pl310_cache_sync - performs a cache sync operation
223  *
224  *	According to the TRM:
225  *
226  *  "Before writing to any other register you must perform an explicit
227  *   Cache Sync operation. This is particularly important when the cache is
228  *   enabled and changes to how the cache allocates new lines are to be made."
229  *
230  *
231  */
232 static __inline void
233 pl310_cache_sync(void)
234 {
235 
236 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
237 		return;
238 
239 	/* Do not sync outer cache on IO coherent platform */
240 	if (pl310_softc->sc_io_coherent)
241 		return;
242 
243 #ifdef PL310_ERRATA_753970
244 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
245 		/* Write uncached PL310 register */
246 		pl310_write4(pl310_softc, 0x740, 0xffffffff);
247 	else
248 #endif
249 		pl310_write4(pl310_softc, PL310_CACHE_SYNC, 0xffffffff);
250 }
251 
252 static void
253 pl310_wbinv_all(void)
254 {
255 
256 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
257 		return;
258 
259 	PL310_LOCK(pl310_softc);
260 #ifdef PL310_ERRATA_727915
261 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r2p0) {
262 		int i, j;
263 
264 		for (i = 0; i < g_ways_assoc; i++) {
265 			for (j = 0; j < g_way_size / g_l2cache_line_size; j++) {
266 				pl310_write4(pl310_softc,
267 				    PL310_CLEAN_INV_LINE_IDX,
268 				    (i << 28 | j << 5));
269 			}
270 		}
271 		pl310_cache_sync();
272 		PL310_UNLOCK(pl310_softc);
273 		return;
274 	}
275 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
276 		platform_pl310_write_debug(pl310_softc, 3);
277 #endif
278 	pl310_write4(pl310_softc, PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
279 	pl310_wait_background_op(PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
280 	pl310_cache_sync();
281 #ifdef PL310_ERRATA_727915
282 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
283 		platform_pl310_write_debug(pl310_softc, 0);
284 #endif
285 	PL310_UNLOCK(pl310_softc);
286 }
287 
288 static void
289 pl310_wbinv_range(vm_paddr_t start, vm_size_t size)
290 {
291 
292 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
293 		return;
294 
295 	PL310_LOCK(pl310_softc);
296 	if (start & g_l2cache_align_mask) {
297 		size += start & g_l2cache_align_mask;
298 		start &= ~g_l2cache_align_mask;
299 	}
300 	if (size & g_l2cache_align_mask) {
301 		size &= ~g_l2cache_align_mask;
302 	   	size += g_l2cache_line_size;
303 	}
304 
305 #ifdef PL310_ERRATA_727915
306 	if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
307 	    pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
308 		platform_pl310_write_debug(pl310_softc, 3);
309 #endif
310 	while (size > 0) {
311 #ifdef PL310_ERRATA_588369
312 		if (pl310_softc->sc_rtl_revision <= CACHE_ID_RELEASE_r1p0) {
313 			/*
314 			 * Errata 588369 says that clean + inv may keep the
315 			 * cache line if it was clean, the recommanded
316 			 * workaround is to clean then invalidate the cache
317 			 * line, with write-back and cache linefill disabled.
318 			 */
319 			pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
320 			pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
321 		} else
322 #endif
323 			pl310_write4(pl310_softc, PL310_CLEAN_INV_LINE_PA,
324 			    start);
325 		start += g_l2cache_line_size;
326 		size -= g_l2cache_line_size;
327 	}
328 #ifdef PL310_ERRATA_727915
329 	if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
330 	    pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
331 		platform_pl310_write_debug(pl310_softc, 0);
332 #endif
333 
334 	pl310_cache_sync();
335 	PL310_UNLOCK(pl310_softc);
336 }
337 
338 static void
339 pl310_wb_range(vm_paddr_t start, vm_size_t size)
340 {
341 
342 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
343 		return;
344 
345 	PL310_LOCK(pl310_softc);
346 	if (start & g_l2cache_align_mask) {
347 		size += start & g_l2cache_align_mask;
348 		start &= ~g_l2cache_align_mask;
349 	}
350 
351 	if (size & g_l2cache_align_mask) {
352 		size &= ~g_l2cache_align_mask;
353 		size += g_l2cache_line_size;
354 	}
355 
356 	while (size > 0) {
357 		pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
358 		start += g_l2cache_line_size;
359 		size -= g_l2cache_line_size;
360 	}
361 
362 	pl310_cache_sync();
363 	PL310_UNLOCK(pl310_softc);
364 }
365 
366 static void
367 pl310_inv_range(vm_paddr_t start, vm_size_t size)
368 {
369 
370 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
371 		return;
372 
373 	PL310_LOCK(pl310_softc);
374 	if (start & g_l2cache_align_mask) {
375 		size += start & g_l2cache_align_mask;
376 		start &= ~g_l2cache_align_mask;
377 	}
378 	if (size & g_l2cache_align_mask) {
379 		size &= ~g_l2cache_align_mask;
380 		size += g_l2cache_line_size;
381 	}
382 	while (size > 0) {
383 		pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
384 		start += g_l2cache_line_size;
385 		size -= g_l2cache_line_size;
386 	}
387 
388 	pl310_cache_sync();
389 	PL310_UNLOCK(pl310_softc);
390 }
391 
392 static void
393 pl310_drain_writebuf(void)
394 {
395 
396 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
397 		return;
398 
399 	PL310_LOCK(pl310_softc);
400 	pl310_cache_sync();
401 	PL310_UNLOCK(pl310_softc);
402 }
403 
404 static void
405 pl310_set_way_sizes(struct pl310_softc *sc)
406 {
407 	uint32_t aux_value;
408 
409 	aux_value = pl310_read4(sc, PL310_AUX_CTRL);
410 	g_way_size = (aux_value & AUX_CTRL_WAY_SIZE_MASK) >>
411 	    AUX_CTRL_WAY_SIZE_SHIFT;
412 	g_way_size = 1 << (g_way_size + 13);
413 	if (aux_value & (1 << AUX_CTRL_ASSOCIATIVITY_SHIFT))
414 		g_ways_assoc = 16;
415 	else
416 		g_ways_assoc = 8;
417 	g_l2cache_way_mask = (1 << g_ways_assoc) - 1;
418 	g_l2cache_size = g_way_size * g_ways_assoc;
419 }
420 
421 /*
422  * Setup interrupt handling.  This is done only if the cache controller is
423  * disabled, for debugging.  We set counters so when a cache event happens we'll
424  * get interrupted and be warned that something is wrong, because no cache
425  * events should happen if we're disabled.
426  */
427 static void
428 pl310_config_intr(void *arg)
429 {
430 	struct pl310_softc * sc;
431 
432 	sc = arg;
433 
434 	/* activate the interrupt */
435 	bus_setup_intr(sc->sc_dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
436 	    pl310_filter, NULL, sc, &sc->sc_irq_h);
437 
438 	/* Cache Line Eviction for Counter 0 */
439 	pl310_write4(sc, PL310_EVENT_COUNTER0_CONF,
440 	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_CO);
441 	/* Data Read Request for Counter 1 */
442 	pl310_write4(sc, PL310_EVENT_COUNTER1_CONF,
443 	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_DRREQ);
444 
445 	/* Enable and clear pending interrupts */
446 	pl310_write4(sc, PL310_INTR_CLEAR, INTR_MASK_ECNTR);
447 	pl310_write4(sc, PL310_INTR_MASK, INTR_MASK_ALL);
448 
449 	/* Enable counters and reset C0 and C1 */
450 	pl310_write4(sc, PL310_EVENT_COUNTER_CTRL,
451 	    EVENT_COUNTER_CTRL_ENABLED |
452 	    EVENT_COUNTER_CTRL_C0_RESET |
453 	    EVENT_COUNTER_CTRL_C1_RESET);
454 
455 	config_intrhook_disestablish(sc->sc_ich);
456 	free(sc->sc_ich, M_DEVBUF);
457 	sc->sc_ich = NULL;
458 }
459 
460 static int
461 pl310_probe(device_t dev)
462 {
463 
464 	if (!ofw_bus_status_okay(dev))
465 		return (ENXIO);
466 	if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
467 		return (ENXIO);
468 	device_set_desc(dev, "PL310 L2 cache controller");
469 	return (0);
470 }
471 
472 static int
473 pl310_attach(device_t dev)
474 {
475 	struct pl310_softc *sc = device_get_softc(dev);
476 	int rid;
477 	uint32_t cache_id, debug_ctrl;
478 	phandle_t node;
479 
480 	sc->sc_dev = dev;
481 	rid = 0;
482 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
483 	    RF_ACTIVE);
484 	if (sc->sc_mem_res == NULL)
485 		panic("%s: Cannot map registers", device_get_name(dev));
486 
487 	/* Allocate an IRQ resource */
488 	rid = 0;
489 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
490 	                                        RF_ACTIVE | RF_SHAREABLE);
491 	if (sc->sc_irq_res == NULL) {
492 		device_printf(dev, "cannot allocate IRQ, not using interrupt\n");
493 	}
494 
495 	pl310_softc = sc;
496 	mtx_init(&sc->sc_mtx, "pl310lock", NULL, MTX_SPIN);
497 
498 	cache_id = pl310_read4(sc, PL310_CACHE_ID);
499 	sc->sc_rtl_revision = (cache_id >> CACHE_ID_RELEASE_SHIFT) &
500 	    CACHE_ID_RELEASE_MASK;
501 	device_printf(dev, "Part number: 0x%x, release: 0x%x\n",
502 	    (cache_id >> CACHE_ID_PARTNUM_SHIFT) & CACHE_ID_PARTNUM_MASK,
503 	    (cache_id >> CACHE_ID_RELEASE_SHIFT) & CACHE_ID_RELEASE_MASK);
504 
505 	/*
506 	 * Test for "arm,io-coherent" property and disable sync operation if
507 	 * platform is I/O coherent. Outer sync operations are not needed
508 	 * on coherent platform and may be harmful in certain situations.
509 	 */
510 	node = ofw_bus_get_node(dev);
511 	if (OF_hasprop(node, "arm,io-coherent"))
512 		sc->sc_io_coherent = true;
513 
514 	/*
515 	 * If L2 cache is already enabled then something has violated the rules,
516 	 * because caches are supposed to be off at kernel entry.  The cache
517 	 * must be disabled to write the configuration registers without
518 	 * triggering an access error (SLVERR), but there's no documented safe
519 	 * procedure for disabling the L2 cache in the manual.  So we'll try to
520 	 * invent one:
521 	 *  - Use the debug register to force write-through mode and prevent
522 	 *    linefills (allocation of new lines on read); now anything we do
523 	 *    will not cause new data to come into the L2 cache.
524 	 *  - Writeback and invalidate the current contents.
525 	 *  - Disable the controller.
526 	 *  - Restore the original debug settings.
527 	 */
528 	if (pl310_read4(sc, PL310_CTRL) & CTRL_ENABLED) {
529 		device_printf(dev, "Warning: L2 Cache should not already be "
530 		    "active; trying to de-activate and re-initialize...\n");
531 		sc->sc_enabled = 1;
532 		debug_ctrl = pl310_read4(sc, PL310_DEBUG_CTRL);
533 		platform_pl310_write_debug(sc, debug_ctrl |
534 		    DEBUG_CTRL_DISABLE_WRITEBACK | DEBUG_CTRL_DISABLE_LINEFILL);
535 		pl310_set_way_sizes(sc);
536 		pl310_wbinv_all();
537 		platform_pl310_write_ctrl(sc, CTRL_DISABLED);
538 		platform_pl310_write_debug(sc, debug_ctrl);
539 	}
540 	sc->sc_enabled = pl310_enabled;
541 
542 	if (sc->sc_enabled) {
543 		platform_pl310_init(sc);
544 		pl310_set_way_sizes(sc); /* platform init might change these */
545 		pl310_write4(pl310_softc, PL310_INV_WAY, 0xffff);
546 		pl310_wait_background_op(PL310_INV_WAY, 0xffff);
547 		platform_pl310_write_ctrl(sc, CTRL_ENABLED);
548 		device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
549 		    (g_l2cache_size / 1024), g_l2cache_line_size, g_ways_assoc);
550 		if (bootverbose)
551 			pl310_print_config(sc);
552 	} else {
553 		if (sc->sc_irq_res != NULL) {
554 			sc->sc_ich = malloc(sizeof(*sc->sc_ich), M_DEVBUF, M_WAITOK);
555 			sc->sc_ich->ich_func = pl310_config_intr;
556 			sc->sc_ich->ich_arg = sc;
557 			if (config_intrhook_establish(sc->sc_ich) != 0) {
558 				device_printf(dev,
559 				    "config_intrhook_establish failed\n");
560 				free(sc->sc_ich, M_DEVBUF);
561 				return(ENXIO);
562 			}
563 		}
564 
565 		device_printf(dev, "L2 Cache disabled\n");
566 	}
567 
568 	/* Set the l2 functions in the set of cpufuncs */
569 	cpufuncs.cf_l2cache_wbinv_all = pl310_wbinv_all;
570 	cpufuncs.cf_l2cache_wbinv_range = pl310_wbinv_range;
571 	cpufuncs.cf_l2cache_inv_range = pl310_inv_range;
572 	cpufuncs.cf_l2cache_wb_range = pl310_wb_range;
573 	cpufuncs.cf_l2cache_drain_writebuf = pl310_drain_writebuf;
574 
575 	return (0);
576 }
577 
578 static device_method_t pl310_methods[] = {
579 	DEVMETHOD(device_probe, pl310_probe),
580 	DEVMETHOD(device_attach, pl310_attach),
581 	DEVMETHOD_END
582 };
583 
584 static driver_t pl310_driver = {
585         "l2cache",
586         pl310_methods,
587         sizeof(struct pl310_softc),
588 };
589 
590 EARLY_DRIVER_MODULE(pl310, simplebus, pl310_driver, 0, 0,
591     BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
592