xref: /freebsd/sys/arm/arm/pl310.c (revision f56f82e0)
1 /*-
2  * Copyright (c) 2012 Olivier Houchard <cognet@FreeBSD.org>
3  * Copyright (c) 2011
4  *	Ben Gray <ben.r.gray@gmail.com>.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the company nor the name of the author may be used to
16  *    endorse or promote products derived from this software without specific
17  *    prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY BEN GRAY ``AS IS'' AND ANY EXPRESS OR
20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22  * IN NO EVENT SHALL BEN GRAY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
25  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
27  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
28  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/rman.h>
38 #include <sys/module.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <machine/intr.h>
42 
43 #include <machine/bus.h>
44 #include <machine/pl310.h>
45 
46 #include <dev/ofw/openfirm.h>
47 #include <dev/ofw/ofw_bus.h>
48 #include <dev/ofw/ofw_bus_subr.h>
49 
50 /*
51  * Define this if you need to disable PL310 for debugging purpose
52  * Spec:
53  * http://infocenter.arm.com/help/topic/com.arm.doc.ddi0246e/DDI0246E_l2c310_r3p1_trm.pdf
54  */
55 
56 /*
57  * Hardcode errata for now
58  * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246b/pr01s02s02.html
59  */
60 #define	PL310_ERRATA_588369
61 #define	PL310_ERRATA_753970
62 #define	PL310_ERRATA_727915
63 
64 #define	PL310_LOCK(sc) do {		\
65 	mtx_lock_spin(&(sc)->sc_mtx);	\
66 } while(0);
67 
68 #define	PL310_UNLOCK(sc) do {		\
69 	mtx_unlock_spin(&(sc)->sc_mtx);	\
70 } while(0);
71 
72 static int pl310_enabled = 1;
73 TUNABLE_INT("hw.pl310.enabled", &pl310_enabled);
74 
75 static uint32_t g_l2cache_way_mask;
76 
77 static const uint32_t g_l2cache_line_size = 32;
78 static const uint32_t g_l2cache_align_mask = (32 - 1);
79 
80 static uint32_t g_l2cache_size;
81 static uint32_t g_way_size;
82 static uint32_t g_ways_assoc;
83 
84 static struct pl310_softc *pl310_softc;
85 
86 static struct ofw_compat_data compat_data[] = {
87 	{"arm,pl310",		true}, /* Non-standard, FreeBSD. */
88 	{"arm,pl310-cache",	true},
89 	{NULL,			false}
90 };
91 
92 static void
93 pl310_print_config(struct pl310_softc *sc)
94 {
95 	uint32_t aux, prefetch;
96 	const char *dis = "disabled";
97 	const char *ena = "enabled";
98 
99 	aux = pl310_read4(sc, PL310_AUX_CTRL);
100 	prefetch = pl310_read4(sc, PL310_PREFETCH_CTRL);
101 
102 	device_printf(sc->sc_dev, "Early BRESP response: %s\n",
103 		(aux & AUX_CTRL_EARLY_BRESP) ? ena : dis);
104 	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
105 		(aux & AUX_CTRL_INSTR_PREFETCH) ? ena : dis);
106 	device_printf(sc->sc_dev, "Data prefetch: %s\n",
107 		(aux & AUX_CTRL_DATA_PREFETCH) ? ena : dis);
108 	device_printf(sc->sc_dev, "Non-secure interrupt control: %s\n",
109 		(aux & AUX_CTRL_NS_INT_CTRL) ? ena : dis);
110 	device_printf(sc->sc_dev, "Non-secure lockdown: %s\n",
111 		(aux & AUX_CTRL_NS_LOCKDOWN) ? ena : dis);
112 	device_printf(sc->sc_dev, "Share override: %s\n",
113 		(aux & AUX_CTRL_SHARE_OVERRIDE) ? ena : dis);
114 
115 	device_printf(sc->sc_dev, "Double linefill: %s\n",
116 		(prefetch & PREFETCH_CTRL_DL) ? ena : dis);
117 	device_printf(sc->sc_dev, "Instruction prefetch: %s\n",
118 		(prefetch & PREFETCH_CTRL_INSTR_PREFETCH) ? ena : dis);
119 	device_printf(sc->sc_dev, "Data prefetch: %s\n",
120 		(prefetch & PREFETCH_CTRL_DATA_PREFETCH) ? ena : dis);
121 	device_printf(sc->sc_dev, "Double linefill on WRAP request: %s\n",
122 		(prefetch & PREFETCH_CTRL_DL_ON_WRAP) ? ena : dis);
123 	device_printf(sc->sc_dev, "Prefetch drop: %s\n",
124 		(prefetch & PREFETCH_CTRL_PREFETCH_DROP) ? ena : dis);
125 	device_printf(sc->sc_dev, "Incr double Linefill: %s\n",
126 		(prefetch & PREFETCH_CTRL_INCR_DL) ? ena : dis);
127 	device_printf(sc->sc_dev, "Not same ID on exclusive sequence: %s\n",
128 		(prefetch & PREFETCH_CTRL_NOTSAMEID) ? ena : dis);
129 	device_printf(sc->sc_dev, "Prefetch offset: %d\n",
130 		(prefetch & PREFETCH_CTRL_OFFSET_MASK));
131 }
132 
133 void
134 pl310_set_ram_latency(struct pl310_softc *sc, uint32_t which_reg,
135    uint32_t read, uint32_t write, uint32_t setup)
136 {
137 	uint32_t v;
138 
139 	KASSERT(which_reg == PL310_TAG_RAM_CTRL ||
140 	    which_reg == PL310_DATA_RAM_CTRL,
141 	    ("bad pl310 ram latency register address"));
142 
143 	v = pl310_read4(sc, which_reg);
144 	if (setup != 0) {
145 		KASSERT(setup <= 8, ("bad pl310 setup latency: %d", setup));
146 		v &= ~RAM_CTRL_SETUP_MASK;
147 		v |= (setup - 1) << RAM_CTRL_SETUP_SHIFT;
148 	}
149 	if (read != 0) {
150 		KASSERT(read <= 8, ("bad pl310 read latency: %d", read));
151 		v &= ~RAM_CTRL_READ_MASK;
152 		v |= (read - 1) << RAM_CTRL_READ_SHIFT;
153 	}
154 	if (write != 0) {
155 		KASSERT(write <= 8, ("bad pl310 write latency: %d", write));
156 		v &= ~RAM_CTRL_WRITE_MASK;
157 		v |= (write - 1) << RAM_CTRL_WRITE_SHIFT;
158 	}
159 	pl310_write4(sc, which_reg, v);
160 }
161 
162 static int
163 pl310_filter(void *arg)
164 {
165 	struct pl310_softc *sc = arg;
166 	uint32_t intr;
167 
168 	intr = pl310_read4(sc, PL310_INTR_MASK);
169 
170 	if (!sc->sc_enabled && (intr & INTR_MASK_ECNTR)) {
171 		/*
172 		 * This is for debug purpose, so be blunt about it
173 		 * We disable PL310 only when something fishy is going
174 		 * on and we need to make sure L2 cache is 100% disabled
175 		 */
176 		panic("pl310: caches disabled but cache event detected\n");
177 	}
178 
179 	return (FILTER_HANDLED);
180 }
181 
182 static __inline void
183 pl310_wait_background_op(uint32_t off, uint32_t mask)
184 {
185 
186 	while (pl310_read4(pl310_softc, off) & mask)
187 		continue;
188 }
189 
190 
191 /**
192  *	pl310_cache_sync - performs a cache sync operation
193  *
194  *	According to the TRM:
195  *
196  *  "Before writing to any other register you must perform an explicit
197  *   Cache Sync operation. This is particularly important when the cache is
198  *   enabled and changes to how the cache allocates new lines are to be made."
199  *
200  *
201  */
202 static __inline void
203 pl310_cache_sync(void)
204 {
205 
206 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
207 		return;
208 
209 #ifdef PL310_ERRATA_753970
210 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
211 		/* Write uncached PL310 register */
212 		pl310_write4(pl310_softc, 0x740, 0xffffffff);
213 	else
214 #endif
215 		pl310_write4(pl310_softc, PL310_CACHE_SYNC, 0xffffffff);
216 }
217 
218 
219 static void
220 pl310_wbinv_all(void)
221 {
222 
223 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
224 		return;
225 
226 	PL310_LOCK(pl310_softc);
227 #ifdef PL310_ERRATA_727915
228 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r2p0) {
229 		int i, j;
230 
231 		for (i = 0; i < g_ways_assoc; i++) {
232 			for (j = 0; j < g_way_size / g_l2cache_line_size; j++) {
233 				pl310_write4(pl310_softc,
234 				    PL310_CLEAN_INV_LINE_IDX,
235 				    (i << 28 | j << 5));
236 			}
237 		}
238 		pl310_cache_sync();
239 		PL310_UNLOCK(pl310_softc);
240 		return;
241 
242 	}
243 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
244 		platform_pl310_write_debug(pl310_softc, 3);
245 #endif
246 	pl310_write4(pl310_softc, PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
247 	pl310_wait_background_op(PL310_CLEAN_INV_WAY, g_l2cache_way_mask);
248 	pl310_cache_sync();
249 #ifdef PL310_ERRATA_727915
250 	if (pl310_softc->sc_rtl_revision == CACHE_ID_RELEASE_r3p0)
251 		platform_pl310_write_debug(pl310_softc, 0);
252 #endif
253 	PL310_UNLOCK(pl310_softc);
254 }
255 
256 static void
257 pl310_wbinv_range(vm_paddr_t start, vm_size_t size)
258 {
259 
260 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
261 		return;
262 
263 	PL310_LOCK(pl310_softc);
264 	if (start & g_l2cache_align_mask) {
265 		size += start & g_l2cache_align_mask;
266 		start &= ~g_l2cache_align_mask;
267 	}
268 	if (size & g_l2cache_align_mask) {
269 		size &= ~g_l2cache_align_mask;
270 	   	size += g_l2cache_line_size;
271 	}
272 
273 
274 #ifdef PL310_ERRATA_727915
275 	if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
276 	    pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
277 		platform_pl310_write_debug(pl310_softc, 3);
278 #endif
279 	while (size > 0) {
280 #ifdef PL310_ERRATA_588369
281 		if (pl310_softc->sc_rtl_revision <= CACHE_ID_RELEASE_r1p0) {
282 			/*
283 			 * Errata 588369 says that clean + inv may keep the
284 			 * cache line if it was clean, the recommanded
285 			 * workaround is to clean then invalidate the cache
286 			 * line, with write-back and cache linefill disabled.
287 			 */
288 			pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
289 			pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
290 		} else
291 #endif
292 			pl310_write4(pl310_softc, PL310_CLEAN_INV_LINE_PA,
293 			    start);
294 		start += g_l2cache_line_size;
295 		size -= g_l2cache_line_size;
296 	}
297 #ifdef PL310_ERRATA_727915
298 	if (pl310_softc->sc_rtl_revision >= CACHE_ID_RELEASE_r2p0 &&
299 	    pl310_softc->sc_rtl_revision < CACHE_ID_RELEASE_r3p1)
300 		platform_pl310_write_debug(pl310_softc, 0);
301 #endif
302 
303 	pl310_cache_sync();
304 	PL310_UNLOCK(pl310_softc);
305 }
306 
307 static void
308 pl310_wb_range(vm_paddr_t start, vm_size_t size)
309 {
310 
311 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
312 		return;
313 
314 	PL310_LOCK(pl310_softc);
315 	if (start & g_l2cache_align_mask) {
316 		size += start & g_l2cache_align_mask;
317 		start &= ~g_l2cache_align_mask;
318 	}
319 
320 	if (size & g_l2cache_align_mask) {
321 		size &= ~g_l2cache_align_mask;
322 		size += g_l2cache_line_size;
323 	}
324 
325 	while (size > 0) {
326 		pl310_write4(pl310_softc, PL310_CLEAN_LINE_PA, start);
327 		start += g_l2cache_line_size;
328 		size -= g_l2cache_line_size;
329 	}
330 
331 	pl310_cache_sync();
332 	PL310_UNLOCK(pl310_softc);
333 }
334 
335 static void
336 pl310_inv_range(vm_paddr_t start, vm_size_t size)
337 {
338 
339 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
340 		return;
341 
342 	PL310_LOCK(pl310_softc);
343 	if (start & g_l2cache_align_mask) {
344 		size += start & g_l2cache_align_mask;
345 		start &= ~g_l2cache_align_mask;
346 	}
347 	if (size & g_l2cache_align_mask) {
348 		size &= ~g_l2cache_align_mask;
349 		size += g_l2cache_line_size;
350 	}
351 	while (size > 0) {
352 		pl310_write4(pl310_softc, PL310_INV_LINE_PA, start);
353 		start += g_l2cache_line_size;
354 		size -= g_l2cache_line_size;
355 	}
356 
357 	pl310_cache_sync();
358 	PL310_UNLOCK(pl310_softc);
359 }
360 
361 static void
362 pl310_drain_writebuf(void)
363 {
364 
365 	if ((pl310_softc == NULL) || !pl310_softc->sc_enabled)
366 		return;
367 
368 	PL310_LOCK(pl310_softc);
369 	pl310_cache_sync();
370 	PL310_UNLOCK(pl310_softc);
371 }
372 
373 static void
374 pl310_set_way_sizes(struct pl310_softc *sc)
375 {
376 	uint32_t aux_value;
377 
378 	aux_value = pl310_read4(sc, PL310_AUX_CTRL);
379 	g_way_size = (aux_value & AUX_CTRL_WAY_SIZE_MASK) >>
380 	    AUX_CTRL_WAY_SIZE_SHIFT;
381 	g_way_size = 1 << (g_way_size + 13);
382 	if (aux_value & (1 << AUX_CTRL_ASSOCIATIVITY_SHIFT))
383 		g_ways_assoc = 16;
384 	else
385 		g_ways_assoc = 8;
386 	g_l2cache_way_mask = (1 << g_ways_assoc) - 1;
387 	g_l2cache_size = g_way_size * g_ways_assoc;
388 }
389 
390 /*
391  * Setup interrupt handling.  This is done only if the cache controller is
392  * disabled, for debugging.  We set counters so when a cache event happens we'll
393  * get interrupted and be warned that something is wrong, because no cache
394  * events should happen if we're disabled.
395  */
396 static void
397 pl310_config_intr(void *arg)
398 {
399 	struct pl310_softc * sc;
400 
401 	sc = arg;
402 
403 	/* activate the interrupt */
404 	bus_setup_intr(sc->sc_dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
405 	    pl310_filter, NULL, sc, &sc->sc_irq_h);
406 
407 	/* Cache Line Eviction for Counter 0 */
408 	pl310_write4(sc, PL310_EVENT_COUNTER0_CONF,
409 	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_CO);
410 	/* Data Read Request for Counter 1 */
411 	pl310_write4(sc, PL310_EVENT_COUNTER1_CONF,
412 	    EVENT_COUNTER_CONF_INCR | EVENT_COUNTER_CONF_DRREQ);
413 
414 	/* Enable and clear pending interrupts */
415 	pl310_write4(sc, PL310_INTR_CLEAR, INTR_MASK_ECNTR);
416 	pl310_write4(sc, PL310_INTR_MASK, INTR_MASK_ALL);
417 
418 	/* Enable counters and reset C0 and C1 */
419 	pl310_write4(sc, PL310_EVENT_COUNTER_CTRL,
420 	    EVENT_COUNTER_CTRL_ENABLED |
421 	    EVENT_COUNTER_CTRL_C0_RESET |
422 	    EVENT_COUNTER_CTRL_C1_RESET);
423 
424 	config_intrhook_disestablish(sc->sc_ich);
425 	free(sc->sc_ich, M_DEVBUF);
426 	sc->sc_ich = NULL;
427 }
428 
429 static int
430 pl310_probe(device_t dev)
431 {
432 
433 	if (!ofw_bus_status_okay(dev))
434 		return (ENXIO);
435 	if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
436 		return (ENXIO);
437 	device_set_desc(dev, "PL310 L2 cache controller");
438 	return (0);
439 }
440 
441 static int
442 pl310_attach(device_t dev)
443 {
444 	struct pl310_softc *sc = device_get_softc(dev);
445 	int rid;
446 	uint32_t cache_id, debug_ctrl;
447 
448 	sc->sc_dev = dev;
449 	rid = 0;
450 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
451 	    RF_ACTIVE);
452 	if (sc->sc_mem_res == NULL)
453 		panic("%s: Cannot map registers", device_get_name(dev));
454 
455 	/* Allocate an IRQ resource */
456 	rid = 0;
457 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
458 	                                        RF_ACTIVE | RF_SHAREABLE);
459 	if (sc->sc_irq_res == NULL) {
460 		device_printf(dev, "cannot allocate IRQ, not using interrupt\n");
461 	}
462 
463 	pl310_softc = sc;
464 	mtx_init(&sc->sc_mtx, "pl310lock", NULL, MTX_SPIN);
465 
466 	cache_id = pl310_read4(sc, PL310_CACHE_ID);
467 	sc->sc_rtl_revision = (cache_id >> CACHE_ID_RELEASE_SHIFT) &
468 	    CACHE_ID_RELEASE_MASK;
469 	device_printf(dev, "Part number: 0x%x, release: 0x%x\n",
470 	    (cache_id >> CACHE_ID_PARTNUM_SHIFT) & CACHE_ID_PARTNUM_MASK,
471 	    (cache_id >> CACHE_ID_RELEASE_SHIFT) & CACHE_ID_RELEASE_MASK);
472 
473 	/*
474 	 * If L2 cache is already enabled then something has violated the rules,
475 	 * because caches are supposed to be off at kernel entry.  The cache
476 	 * must be disabled to write the configuration registers without
477 	 * triggering an access error (SLVERR), but there's no documented safe
478 	 * procedure for disabling the L2 cache in the manual.  So we'll try to
479 	 * invent one:
480 	 *  - Use the debug register to force write-through mode and prevent
481 	 *    linefills (allocation of new lines on read); now anything we do
482 	 *    will not cause new data to come into the L2 cache.
483 	 *  - Writeback and invalidate the current contents.
484 	 *  - Disable the controller.
485 	 *  - Restore the original debug settings.
486 	 */
487 	if (pl310_read4(sc, PL310_CTRL) & CTRL_ENABLED) {
488 		device_printf(dev, "Warning: L2 Cache should not already be "
489 		    "active; trying to de-activate and re-initialize...\n");
490 		sc->sc_enabled = 1;
491 		debug_ctrl = pl310_read4(sc, PL310_DEBUG_CTRL);
492 		platform_pl310_write_debug(sc, debug_ctrl |
493 		    DEBUG_CTRL_DISABLE_WRITEBACK | DEBUG_CTRL_DISABLE_LINEFILL);
494 		pl310_set_way_sizes(sc);
495 		pl310_wbinv_all();
496 		platform_pl310_write_ctrl(sc, CTRL_DISABLED);
497 		platform_pl310_write_debug(sc, debug_ctrl);
498 	}
499 	sc->sc_enabled = pl310_enabled;
500 
501 	if (sc->sc_enabled) {
502 		platform_pl310_init(sc);
503 		pl310_set_way_sizes(sc); /* platform init might change these */
504 		pl310_write4(pl310_softc, PL310_INV_WAY, 0xffff);
505 		pl310_wait_background_op(PL310_INV_WAY, 0xffff);
506 		platform_pl310_write_ctrl(sc, CTRL_ENABLED);
507 		device_printf(dev, "L2 Cache enabled: %uKB/%dB %d ways\n",
508 		    (g_l2cache_size / 1024), g_l2cache_line_size, g_ways_assoc);
509 		if (bootverbose)
510 			pl310_print_config(sc);
511 	} else {
512 		if (sc->sc_irq_res != NULL) {
513 			sc->sc_ich = malloc(sizeof(*sc->sc_ich), M_DEVBUF, M_WAITOK);
514 			sc->sc_ich->ich_func = pl310_config_intr;
515 			sc->sc_ich->ich_arg = sc;
516 			if (config_intrhook_establish(sc->sc_ich) != 0) {
517 				device_printf(dev,
518 				    "config_intrhook_establish failed\n");
519 				free(sc->sc_ich, M_DEVBUF);
520 				return(ENXIO);
521 			}
522 		}
523 
524 		device_printf(dev, "L2 Cache disabled\n");
525 	}
526 
527 	/* Set the l2 functions in the set of cpufuncs */
528 	cpufuncs.cf_l2cache_wbinv_all = pl310_wbinv_all;
529 	cpufuncs.cf_l2cache_wbinv_range = pl310_wbinv_range;
530 	cpufuncs.cf_l2cache_inv_range = pl310_inv_range;
531 	cpufuncs.cf_l2cache_wb_range = pl310_wb_range;
532 	cpufuncs.cf_l2cache_drain_writebuf = pl310_drain_writebuf;
533 
534 	return (0);
535 }
536 
537 static device_method_t pl310_methods[] = {
538 	DEVMETHOD(device_probe, pl310_probe),
539 	DEVMETHOD(device_attach, pl310_attach),
540 	DEVMETHOD_END
541 };
542 
543 static driver_t pl310_driver = {
544         "l2cache",
545         pl310_methods,
546         sizeof(struct pl310_softc),
547 };
548 static devclass_t pl310_devclass;
549 
550 EARLY_DRIVER_MODULE(pl310, simplebus, pl310_driver, pl310_devclass, 0, 0,
551     BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE);
552 
553