xref: /freebsd/sys/arm/arm/pl310.c (revision c697fb7f)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2012 Olivier Houchard <cognet@FreeBSD.org>
5  * Copyright (c) 2011
6  *	Ben Gray <ben.r.gray@gmail.com>.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The name of the company nor the name of the author may be used to
18  *    endorse or promote products derived from this software without specific
19  *    prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
27  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
28  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
30  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/rman.h>
40 #include <sys/module.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <machine/intr.h>
44 
45 #include <machine/bus.h>
46 #include <machine/pl310.h>
47 #ifdef PLATFORM
48 #include <machine/platformvar.h>
49 #endif
50 
51 #include <dev/ofw/openfirm.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 
55 #ifdef PLATFORM
56 #include "platform_pl310_if.h"
57 #endif
58 
59 /*
60  * Define this if you need to disable PL310 for debugging purpose
61  * Spec:
62  * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0246e/DDI0246E_l2c310_r3p1_trm.pdf
63  */
64 
65 /*
66  * Hardcode errata for now
67  * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246b/pr01s02s02.html
68  */
69 #define	PL310_ERRATA_588369
70 #define	PL310_ERRATA_753970
71 #define	PL310_ERRATA_727915
72 
73 #define	PL310_LOCK(sc) do {		\
74 	mtx_lock_spin(&(sc)->sc_mtx);	\
75 } while(0);
76 
77 #define	PL310_UNLOCK(sc) do {		\
78 	mtx_unlock_spin(&(sc)->sc_mtx);	\
79 } while(0);
80 
81 static int pl310_enabled = 1;
82 TUNABLE_INT("hw.pl310.enabled", &pl310_enabled);
83 
84 static uint32_t g_l2cache_way_mask;
85 
86 static const uint32_t g_l2cache_line_size = 32;
87 static const uint32_t g_l2cache_align_mask = (32 - 1);
88 
89 static uint32_t g_l2cache_size;
90 static uint32_t g_way_size;
91 static uint32_t g_ways_assoc;
92 
93 static struct pl310_softc *pl310_softc;
94 
95 static struct ofw_compat_data compat_data[] = {
96 	{"arm,pl310",		true}, /* Non-standard, FreeBSD. */
97 	{"arm,pl310-cache",	true},
98 	{NULL,			false}
99 };
100 
101 #ifdef PLATFORM
102 static void
103 platform_pl310_init(struct pl310_softc *sc)
104 {
105 
106 	PLATFORM_PL310_INIT(platform_obj(), sc);
107 }
108 
109 static void
110 platform_pl310_write_ctrl(struct pl310_softc *sc, uint32_t val)
111 {
112 
113 	PLATFORM_PL310_WRITE_CTRL(platform_obj(), sc, val);
114 }
115 
116 static void
117 platform_pl310_write_debug(struct pl310_softc *sc, uint32_t val)
118 {
119 
120 	PLATFORM_PL310_WRITE_DEBUG(platform_obj(), sc, val);
121 }
122 #endif
123 
124 static void
125 pl310_print_config(struct pl310_softc *sc)
126 {
127 	uint32_t aux, prefetch;
128 	const char *dis = "disabled";
129 	const char *ena = "enabled";
130 
131 	aux = pl310_read4(sc, PL310_AUX_CTRL);
132 	prefetch = pl310_read4(sc, PL310_PREFETCH_CTRL);
133 
134 	device_printf(sc->sc_dev, "Early BRESP response: %s\n",
135 		(aux & AUX_CTRL_EARLY_BRESP) ? ena : dis);
136 	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
137 		(aux & AUX_CTRL_INSTR_PREFETCH) ? ena : dis);
138 	device_printf(sc->sc_dev, "Data prefetch: %s\n",
139 		(aux & AUX_CTRL_DATA_PREFETCH) ? ena : dis);
140 	device_printf(sc->sc_dev, "Non-secure interrupt control: %s\n",
141 		(aux & AUX_CTRL_NS_INT_CTRL) ? ena : dis);
142 	device_printf(sc->sc_dev, "Non-secure lockdown: %s\n",
143 		(aux & AUX_CTRL_NS_LOCKDOWN) ? ena : dis);
144 	device_printf(sc->sc_dev, "Share override: %s\n",
145 		(aux & AUX_CTRL_SHARE_OVERRIDE) ? ena : dis);
146 
147 	device_printf(sc->sc_dev, "Double linefill: %s\n",
148 		(prefetch & PREFETCH_CTRL_DL) ? ena : dis);
149 	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
150 		(prefetch & PREFETCH_CTRL_INSTR_PREFETCH) ? ena : dis);
151 	device_printf(sc->sc_dev, "Data prefetch: %s\n",
152 		(prefetch & PREFETCH_CTRL_DATA_PREFETCH) ? ena : dis);
153 	device_printf(sc->sc_dev, "Double linefill on WRAP request: %s\n",
154 		(prefetch & PREFETCH_CTRL_DL_ON_WRAP) ? ena : dis);
155 	device_printf(sc->sc_dev, "Prefetch drop: %s\n",
156 		(prefetch & PREFETCH_CTRL_PREFETCH_DROP) ? ena : dis);
157 	device_printf(sc->sc_dev, "Incr double Linefill: %s\n",
158 		(prefetch & PREFETCH_CTRL_INCR_DL) ? ena : dis);
159 	device_printf(sc->sc_dev, "Not same ID on exclusive sequence: %s\n",
160 		(prefetch & PREFETCH_CTRL_NOTSAMEID) ? ena : dis);
161 	device_printf(sc->sc_dev, "Prefetch offset: %d\n",
162 		(prefetch & PREFETCH_CTRL_OFFSET_MASK));
163 }
164 
165 void
166 pl310_set_ram_latency(struct pl310_softc *sc, uint32_t which_reg,
167    uint32_t read, uint32_t write, uint32_t setup)
168 {
169 	uint32_t v;
170 
171 	KASSERT(which_reg == PL310_TAG_RAM_CTRL ||
172 	    which_reg == PL310_DATA_RAM_CTRL,
173 	    ("bad pl310 ram latency register address"));
174 
175 	v = pl310_read4(sc, which_reg);
176 	if (setup != 0) {
177 		KASSERT(setup <= 8, ("bad pl310 setup latency: %d", setup));
178 		v &= ~RAM_CTRL_SETUP_MASK;
179 		v |= (setup - 1) << RAM_CTRL_SETUP_SHIFT;
180 	}
181 	if (read != 0) {
182 		KASSERT(read <= 8, ("bad pl310 read latency: %d", read));
183 		v &= ~RAM_CTRL_READ_MASK;
184 		v |= (read - 1) << RAM_CTRL_READ_SHIFT;
185 	}
186 	if (write != 0) {
187 		KASSERT(write <= 8, ("bad pl310 write latency: %d", write));
188 		v &= ~RAM_CTRL_WRITE_MASK;
189 		v |= (write - 1) << RAM_CTRL_WRITE_SHIFT;
190 	}
191 	pl310_write4(sc, which_reg, v);
192 }
193 
194 static int
195 pl310_filter(void *arg)
196 {
197 	struct pl310_softc *sc = arg;
198 	uint32_t intr;
199 
200 	intr = pl310_read4(sc, PL310_INTR_MASK);
201 
202 	if (!sc->sc_enabled && (intr & INTR_MASK_ECNTR)) {
203 		/*
204 		 * This is for debug purpose, so be blunt about it
205 		 * We disable PL310 only when something fishy is going
206 		 * on and we need to make sure L2 cache is 100% disabled
207 		 */
208 		panic("pl310: caches disabled but cache event detected\n");
209 	}
210 
211 	return (FILTER_HANDLED);
212 }
213 
214 static __inline void
215 pl310_wait_background_op(uint32_t off, uint32_t mask)
216 {
217 
218 	while (pl310_read4(pl310_softc, off) & mask)
219 		continue;
220 }
221 
222 
223 /**
224  *	pl310_cache_sync - performs a cache sync operation
225  *
226  *	According to the TRM:
227  *
228  *  "Before writing to any other register you must perform an explicit
229  *   Cache Sync operation. This is particularly important when the cache is
230  *   enabled and changes to how the cache allocates new lines are to be made."
231  *
232  *
233  */
234 static __inline void
235 pl310_cache_sync(void)
236 {
237 
238 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
239 		return;
240 
241 	/* Do not sync outer cache on IO coherent platform */
242 	if (pl310_softc->sc_io_coherent)
243 		return;
244 
245 #ifdef PL310_ERRATA_753970
246 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
247 		/* Write uncached PL310 register */
248 		pl310_write4(pl310_softc, 0x740, 0xffffffff);
249 	else
250 #endif
251 		pl310_write4(pl310_softc, PL310_CACHE_SYNC, 0xffffffff);
252 }
253 
254 
255 static void
256 pl310_wbinv_all(void)
257 {
258 
259 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
260 		return;
261 
262 	PL310_LOCK(pl310_softc);
263 #ifdef PL310_ERRATA_727915
264 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r2p0) {
265 		int i, j;
266 
267 		for (i = 0; i < g_ways_assoc; i++) {
268 			for (j = 0; j < g_way_size / g_l2cache_line_size; j++) {
269 				pl310_write4(pl310_softc,
270 				    PL310_CLEAN_INV_LINE_IDX,
271 				    (i << 28 | j << 5));
272 			}
273 		}
274 		pl310_cache_sync();
275 		PL310_UNLOCK(pl310_softc);
276 		return;
277 
278 	}
279 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
280 		platform_pl310_write_debug(pl310_softc, 3);
281 #endif
282 	pl310_write4(pl310_softc, PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
283 	pl310_wait_background_op(PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
284 	pl310_cache_sync();
285 #ifdef PL310_ERRATA_727915
286 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
287 		platform_pl310_write_debug(pl310_softc, 0);
288 #endif
289 	PL310_UNLOCK(pl310_softc);
290 }
291 
292 static void
293 pl310_wbinv_range(vm_paddr_t start, vm_size_t size)
294 {
295 
296 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
297 		return;
298 
299 	PL310_LOCK(pl310_softc);
300 	if (start & g_l2cache_align_mask) {
301 		size += start & g_l2cache_align_mask;
302 		start &= ~g_l2cache_align_mask;
303 	}
304 	if (size & g_l2cache_align_mask) {
305 		size &= ~g_l2cache_align_mask;
306 	   	size += g_l2cache_line_size;
307 	}
308 
309 
310 #ifdef PL310_ERRATA_727915
311 	if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
312 	    pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
313 		platform_pl310_write_debug(pl310_softc, 3);
314 #endif
315 	while (size > 0) {
316 #ifdef PL310_ERRATA_588369
317 		if (pl310_softc->sc_rtl_revision <= CACHE_ID_RELEASE_r1p0) {
318 			/*
319 			 * Errata 588369 says that clean + inv may keep the
320 			 * cache line if it was clean, the recommanded
321 			 * workaround is to clean then invalidate the cache
322 			 * line, with write-back and cache linefill disabled.
323 			 */
324 			pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
325 			pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
326 		} else
327 #endif
328 			pl310_write4(pl310_softc, PL310_CLEAN_INV_LINE_PA,
329 			    start);
330 		start += g_l2cache_line_size;
331 		size -= g_l2cache_line_size;
332 	}
333 #ifdef PL310_ERRATA_727915
334 	if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
335 	    pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
336 		platform_pl310_write_debug(pl310_softc, 0);
337 #endif
338 
339 	pl310_cache_sync();
340 	PL310_UNLOCK(pl310_softc);
341 }
342 
343 static void
344 pl310_wb_range(vm_paddr_t start, vm_size_t size)
345 {
346 
347 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
348 		return;
349 
350 	PL310_LOCK(pl310_softc);
351 	if (start & g_l2cache_align_mask) {
352 		size += start & g_l2cache_align_mask;
353 		start &= ~g_l2cache_align_mask;
354 	}
355 
356 	if (size & g_l2cache_align_mask) {
357 		size &= ~g_l2cache_align_mask;
358 		size += g_l2cache_line_size;
359 	}
360 
361 	while (size > 0) {
362 		pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
363 		start += g_l2cache_line_size;
364 		size -= g_l2cache_line_size;
365 	}
366 
367 	pl310_cache_sync();
368 	PL310_UNLOCK(pl310_softc);
369 }
370 
371 static void
372 pl310_inv_range(vm_paddr_t start, vm_size_t size)
373 {
374 
375 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
376 		return;
377 
378 	PL310_LOCK(pl310_softc);
379 	if (start & g_l2cache_align_mask) {
380 		size += start & g_l2cache_align_mask;
381 		start &= ~g_l2cache_align_mask;
382 	}
383 	if (size & g_l2cache_align_mask) {
384 		size &= ~g_l2cache_align_mask;
385 		size += g_l2cache_line_size;
386 	}
387 	while (size > 0) {
388 		pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
389 		start += g_l2cache_line_size;
390 		size -= g_l2cache_line_size;
391 	}
392 
393 	pl310_cache_sync();
394 	PL310_UNLOCK(pl310_softc);
395 }
396 
397 static void
398 pl310_drain_writebuf(void)
399 {
400 
401 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
402 		return;
403 
404 	PL310_LOCK(pl310_softc);
405 	pl310_cache_sync();
406 	PL310_UNLOCK(pl310_softc);
407 }
408 
409 static void
410 pl310_set_way_sizes(struct pl310_softc *sc)
411 {
412 	uint32_t aux_value;
413 
414 	aux_value = pl310_read4(sc, PL310_AUX_CTRL);
415 	g_way_size = (aux_value & AUX_CTRL_WAY_SIZE_MASK) >>
416 	    AUX_CTRL_WAY_SIZE_SHIFT;
417 	g_way_size = 1 << (g_way_size + 13);
418 	if (aux_value & (1 << AUX_CTRL_ASSOCIATIVITY_SHIFT))
419 		g_ways_assoc = 16;
420 	else
421 		g_ways_assoc = 8;
422 	g_l2cache_way_mask = (1 << g_ways_assoc) - 1;
423 	g_l2cache_size = g_way_size * g_ways_assoc;
424 }
425 
426 /*
427  * Setup interrupt handling.  This is done only if the cache controller is
428  * disabled, for debugging.  We set counters so when a cache event happens we'll
429  * get interrupted and be warned that something is wrong, because no cache
430  * events should happen if we're disabled.
431  */
432 static void
433 pl310_config_intr(void *arg)
434 {
435 	struct pl310_softc * sc;
436 
437 	sc = arg;
438 
439 	/* activate the interrupt */
440 	bus_setup_intr(sc->sc_dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
441 	    pl310_filter, NULL, sc, &sc->sc_irq_h);
442 
443 	/* Cache Line Eviction for Counter 0 */
444 	pl310_write4(sc, PL310_EVENT_COUNTER0_CONF,
445 	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_CO);
446 	/* Data Read Request for Counter 1 */
447 	pl310_write4(sc, PL310_EVENT_COUNTER1_CONF,
448 	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_DRREQ);
449 
450 	/* Enable and clear pending interrupts */
451 	pl310_write4(sc, PL310_INTR_CLEAR, INTR_MASK_ECNTR);
452 	pl310_write4(sc, PL310_INTR_MASK, INTR_MASK_ALL);
453 
454 	/* Enable counters and reset C0 and C1 */
455 	pl310_write4(sc, PL310_EVENT_COUNTER_CTRL,
456 	    EVENT_COUNTER_CTRL_ENABLED |
457 	    EVENT_COUNTER_CTRL_C0_RESET |
458 	    EVENT_COUNTER_CTRL_C1_RESET);
459 
460 	config_intrhook_disestablish(sc->sc_ich);
461 	free(sc->sc_ich, M_DEVBUF);
462 	sc->sc_ich = NULL;
463 }
464 
465 static int
466 pl310_probe(device_t dev)
467 {
468 
469 	if (!ofw_bus_status_okay(dev))
470 		return (ENXIO);
471 	if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
472 		return (ENXIO);
473 	device_set_desc(dev, "PL310 L2 cache controller");
474 	return (0);
475 }
476 
477 static int
478 pl310_attach(device_t dev)
479 {
480 	struct pl310_softc *sc = device_get_softc(dev);
481 	int rid;
482 	uint32_t cache_id, debug_ctrl;
483 	phandle_t node;
484 
485 	sc->sc_dev = dev;
486 	rid = 0;
487 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
488 	    RF_ACTIVE);
489 	if (sc->sc_mem_res == NULL)
490 		panic("%s: Cannot map registers", device_get_name(dev));
491 
492 	/* Allocate an IRQ resource */
493 	rid = 0;
494 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
495 	                                        RF_ACTIVE | RF_SHAREABLE);
496 	if (sc->sc_irq_res == NULL) {
497 		device_printf(dev, "cannot allocate IRQ, not using interrupt\n");
498 	}
499 
500 	pl310_softc = sc;
501 	mtx_init(&sc->sc_mtx, "pl310lock", NULL, MTX_SPIN);
502 
503 	cache_id = pl310_read4(sc, PL310_CACHE_ID);
504 	sc->sc_rtl_revision = (cache_id >> CACHE_ID_RELEASE_SHIFT) &
505 	    CACHE_ID_RELEASE_MASK;
506 	device_printf(dev, "Part number: 0x%x, release: 0x%x\n",
507 	    (cache_id >> CACHE_ID_PARTNUM_SHIFT) & CACHE_ID_PARTNUM_MASK,
508 	    (cache_id >> CACHE_ID_RELEASE_SHIFT) & CACHE_ID_RELEASE_MASK);
509 
510 	/*
511 	 * Test for "arm,io-coherent" property and disable sync operation if
512 	 * platform is I/O coherent. Outer sync operations are not needed
513 	 * on coherent platform and may be harmful in certain situations.
514 	 */
515 	node = ofw_bus_get_node(dev);
516 	if (OF_hasprop(node, "arm,io-coherent"))
517 		sc->sc_io_coherent = true;
518 
519 	/*
520 	 * If L2 cache is already enabled then something has violated the rules,
521 	 * because caches are supposed to be off at kernel entry.  The cache
522 	 * must be disabled to write the configuration registers without
523 	 * triggering an access error (SLVERR), but there's no documented safe
524 	 * procedure for disabling the L2 cache in the manual.  So we'll try to
525 	 * invent one:
526 	 *  - Use the debug register to force write-through mode and prevent
527 	 *    linefills (allocation of new lines on read); now anything we do
528 	 *    will not cause new data to come into the L2 cache.
529 	 *  - Writeback and invalidate the current contents.
530 	 *  - Disable the controller.
531 	 *  - Restore the original debug settings.
532 	 */
533 	if (pl310_read4(sc, PL310_CTRL) & CTRL_ENABLED) {
534 		device_printf(dev, "Warning: L2 Cache should not already be "
535 		    "active; trying to de-activate and re-initialize...\n");
536 		sc->sc_enabled = 1;
537 		debug_ctrl = pl310_read4(sc, PL310_DEBUG_CTRL);
538 		platform_pl310_write_debug(sc, debug_ctrl |
539 		    DEBUG_CTRL_DISABLE_WRITEBACK | DEBUG_CTRL_DISABLE_LINEFILL);
540 		pl310_set_way_sizes(sc);
541 		pl310_wbinv_all();
542 		platform_pl310_write_ctrl(sc, CTRL_DISABLED);
543 		platform_pl310_write_debug(sc, debug_ctrl);
544 	}
545 	sc->sc_enabled = pl310_enabled;
546 
547 	if (sc->sc_enabled) {
548 		platform_pl310_init(sc);
549 		pl310_set_way_sizes(sc); /* platform init might change these */
550 		pl310_write4(pl310_softc, PL310_INV_WAY, 0xffff);
551 		pl310_wait_background_op(PL310_INV_WAY, 0xffff);
552 		platform_pl310_write_ctrl(sc, CTRL_ENABLED);
553 		device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
554 		    (g_l2cache_size / 1024), g_l2cache_line_size, g_ways_assoc);
555 		if (bootverbose)
556 			pl310_print_config(sc);
557 	} else {
558 		if (sc->sc_irq_res != NULL) {
559 			sc->sc_ich = malloc(sizeof(*sc->sc_ich), M_DEVBUF, M_WAITOK);
560 			sc->sc_ich->ich_func = pl310_config_intr;
561 			sc->sc_ich->ich_arg = sc;
562 			if (config_intrhook_establish(sc->sc_ich) != 0) {
563 				device_printf(dev,
564 				    "config_intrhook_establish failed\n");
565 				free(sc->sc_ich, M_DEVBUF);
566 				return(ENXIO);
567 			}
568 		}
569 
570 		device_printf(dev, "L2 Cache disabled\n");
571 	}
572 
573 	/* Set the l2 functions in the set of cpufuncs */
574 	cpufuncs.cf_l2cache_wbinv_all = pl310_wbinv_all;
575 	cpufuncs.cf_l2cache_wbinv_range = pl310_wbinv_range;
576 	cpufuncs.cf_l2cache_inv_range = pl310_inv_range;
577 	cpufuncs.cf_l2cache_wb_range = pl310_wb_range;
578 	cpufuncs.cf_l2cache_drain_writebuf = pl310_drain_writebuf;
579 
580 	return (0);
581 }
582 
583 static device_method_t pl310_methods[] = {
584 	DEVMETHOD(device_probe, pl310_probe),
585 	DEVMETHOD(device_attach, pl310_attach),
586 	DEVMETHOD_END
587 };
588 
589 static driver_t pl310_driver = {
590         "l2cache",
591         pl310_methods,
592         sizeof(struct pl310_softc),
593 };
594 static devclass_t pl310_devclass;
595 
596 EARLY_DRIVER_MODULE(pl310, simplebus, pl310_driver, pl310_devclass, 0, 0,
597     BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
598 
599