xref: /freebsd/sys/arm/arm/machdep.c (revision 61e21613)
1 /*	$NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright (c) 2004 Olivier Houchard
7  * Copyright (c) 1994-1998 Mark Brinicombe.
8  * Copyright (c) 1994 Brini.
9  * All rights reserved.
10  *
11  * This code is derived from software written for Brini by Mark Brinicombe
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by Mark Brinicombe
24  *	for the NetBSD Project.
25  * 4. The name of the company nor the name of the author may be used to
26  *    endorse or promote products derived from this software without specific
27  *    prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
30  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
31  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
32  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
33  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
34  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
35  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  * Machine dependent functions for kernel setup
42  *
43  * Created      : 17/09/94
44  * Updated	: 18/04/01 updated for new wscons
45  */
46 
47 #include "opt_ddb.h"
48 #include "opt_kstack_pages.h"
49 #include "opt_platform.h"
50 #include "opt_sched.h"
51 
52 #include <sys/param.h>
53 #include <sys/buf.h>
54 #include <sys/bus.h>
55 #include <sys/cons.h>
56 #include <sys/cpu.h>
57 #include <sys/devmap.h>
58 #include <sys/efi.h>
59 #include <sys/imgact.h>
60 #include <sys/kdb.h>
61 #include <sys/kernel.h>
62 #include <sys/ktr.h>
63 #include <sys/linker.h>
64 #include <sys/msgbuf.h>
65 #include <sys/physmem.h>
66 #include <sys/reboot.h>
67 #include <sys/rwlock.h>
68 #include <sys/sched.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysent.h>
71 #include <sys/sysproto.h>
72 #include <sys/vmmeter.h>
73 
74 #include <vm/vm_object.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_pager.h>
77 
78 #include <machine/asm.h>
79 #include <machine/debug_monitor.h>
80 #include <machine/machdep.h>
81 #include <machine/metadata.h>
82 #include <machine/pcb.h>
83 #include <machine/platform.h>
84 #include <machine/sysarch.h>
85 #include <machine/undefined.h>
86 #include <machine/vfp.h>
87 #include <machine/vmparam.h>
88 
89 #ifdef FDT
90 #include <dev/fdt/fdt_common.h>
91 #include <machine/ofw_machdep.h>
92 #endif
93 
94 #ifdef DEBUG
95 #define	debugf(fmt, args...) printf(fmt, ##args)
96 #else
97 #define	debugf(fmt, args...)
98 #endif
99 
100 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
101     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7) || \
102     defined(COMPAT_FREEBSD9)
103 #error FreeBSD/arm doesn't provide compatibility with releases prior to 10
104 #endif
105 
106 
107 #if __ARM_ARCH < 6
108 #error FreeBSD requires ARMv6 or later
109 #endif
110 
111 struct pcpu __pcpu[MAXCPU];
112 struct pcpu *pcpup = &__pcpu[0];
113 
114 static struct trapframe proc0_tf;
115 uint32_t cpu_reset_address = 0;
116 int cold = 1;
117 vm_offset_t vector_page;
118 
119 /* The address at which the kernel was loaded.  Set early in initarm(). */
120 vm_paddr_t arm_physmem_kernaddr;
121 
122 extern int *end;
123 
124 #ifdef FDT
125 vm_paddr_t pmap_pa;
126 vm_offset_t systempage;
127 vm_offset_t irqstack;
128 vm_offset_t undstack;
129 vm_offset_t abtstack;
130 #endif /* FDT */
131 
132 #ifdef PLATFORM
133 static delay_func *delay_impl;
134 static void *delay_arg;
135 #endif
136 
137 struct kva_md_info kmi;
138 /*
139  * arm32_vector_init:
140  *
141  *	Initialize the vector page, and select whether or not to
142  *	relocate the vectors.
143  *
144  *	NOTE: We expect the vector page to be mapped at its expected
145  *	destination.
146  */
147 
148 extern unsigned int page0[], page0_data[];
149 void
150 arm_vector_init(vm_offset_t va, int which)
151 {
152 	unsigned int *vectors = (int *) va;
153 	unsigned int *vectors_data = vectors + (page0_data - page0);
154 	int vec;
155 
156 	/*
157 	 * Loop through the vectors we're taking over, and copy the
158 	 * vector's insn and data word.
159 	 */
160 	for (vec = 0; vec < ARM_NVEC; vec++) {
161 		if ((which & (1 << vec)) == 0) {
162 			/* Don't want to take over this vector. */
163 			continue;
164 		}
165 		vectors[vec] = page0[vec];
166 		vectors_data[vec] = page0_data[vec];
167 	}
168 
169 	/* Now sync the vectors. */
170 	icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int));
171 
172 	vector_page = va;
173 }
174 
175 static void
176 cpu_startup(void *dummy)
177 {
178 	struct pcb *pcb = thread0.td_pcb;
179 	const unsigned int mbyte = 1024 * 1024;
180 
181 	identify_arm_cpu();
182 
183 	vm_ksubmap_init(&kmi);
184 
185 	/*
186 	 * Display the RAM layout.
187 	 */
188 	printf("real memory  = %ju (%ju MB)\n",
189 	    (uintmax_t)arm32_ptob(realmem),
190 	    (uintmax_t)arm32_ptob(realmem) / mbyte);
191 	printf("avail memory = %ju (%ju MB)\n",
192 	    (uintmax_t)arm32_ptob(vm_free_count()),
193 	    (uintmax_t)arm32_ptob(vm_free_count()) / mbyte);
194 	if (bootverbose) {
195 		physmem_print_tables();
196 		devmap_print_table();
197 	}
198 
199 	bufinit();
200 	vm_pager_bufferinit();
201 	pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
202 	    USPACE_SVC_STACK_TOP;
203 	pmap_set_pcb_pagedir(kernel_pmap, pcb);
204 }
205 
206 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
207 
208 /*
209  * Flush the D-cache for non-DMA I/O so that the I-cache can
210  * be made coherent later.
211  */
212 void
213 cpu_flush_dcache(void *ptr, size_t len)
214 {
215 
216 	dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len);
217 }
218 
219 /* Get current clock frequency for the given cpu id. */
220 int
221 cpu_est_clockrate(int cpu_id, uint64_t *rate)
222 {
223 	struct pcpu *pc;
224 
225 	pc = pcpu_find(cpu_id);
226 	if (pc == NULL || rate == NULL)
227 		return (EINVAL);
228 
229 	if (pc->pc_clock == 0)
230 		return (EOPNOTSUPP);
231 
232 	*rate = pc->pc_clock;
233 
234 	return (0);
235 }
236 
237 void
238 cpu_idle(int busy)
239 {
240 
241 	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu);
242 	spinlock_enter();
243 	if (!busy)
244 		cpu_idleclock();
245 	if (!sched_runnable())
246 		cpu_sleep(0);
247 	if (!busy)
248 		cpu_activeclock();
249 	spinlock_exit();
250 	CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu);
251 }
252 
253 int
254 cpu_idle_wakeup(int cpu)
255 {
256 
257 	return (0);
258 }
259 
260 void
261 cpu_initclocks(void)
262 {
263 
264 #ifdef SMP
265 	if (PCPU_GET(cpuid) == 0)
266 		cpu_initclocks_bsp();
267 	else
268 		cpu_initclocks_ap();
269 #else
270 	cpu_initclocks_bsp();
271 #endif
272 }
273 
274 #ifdef PLATFORM
275 void
276 arm_set_delay(delay_func *impl, void *arg)
277 {
278 
279 	KASSERT(impl != NULL, ("No DELAY implementation"));
280 	delay_impl = impl;
281 	delay_arg = arg;
282 }
283 
284 void
285 DELAY(int usec)
286 {
287 
288 	TSENTER();
289 	delay_impl(usec, delay_arg);
290 	TSEXIT();
291 }
292 #endif
293 
294 void
295 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
296 {
297 
298 	pcpu->pc_mpidr = 0xffffffff;
299 }
300 
301 void
302 spinlock_enter(void)
303 {
304 	struct thread *td;
305 	register_t cspr;
306 
307 	td = curthread;
308 	if (td->td_md.md_spinlock_count == 0) {
309 		cspr = disable_interrupts(PSR_I | PSR_F);
310 		td->td_md.md_spinlock_count = 1;
311 		td->td_md.md_saved_cspr = cspr;
312 		critical_enter();
313 	} else
314 		td->td_md.md_spinlock_count++;
315 }
316 
317 void
318 spinlock_exit(void)
319 {
320 	struct thread *td;
321 	register_t cspr;
322 
323 	td = curthread;
324 	cspr = td->td_md.md_saved_cspr;
325 	td->td_md.md_spinlock_count--;
326 	if (td->td_md.md_spinlock_count == 0) {
327 		critical_exit();
328 		restore_interrupts(cspr);
329 	}
330 }
331 
332 /*
333  * Construct a PCB from a trapframe. This is called from kdb_trap() where
334  * we want to start a backtrace from the function that caused us to enter
335  * the debugger. We have the context in the trapframe, but base the trace
336  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
337  * enough for a backtrace.
338  */
339 void
340 makectx(struct trapframe *tf, struct pcb *pcb)
341 {
342 	pcb->pcb_regs.sf_r4 = tf->tf_r4;
343 	pcb->pcb_regs.sf_r5 = tf->tf_r5;
344 	pcb->pcb_regs.sf_r6 = tf->tf_r6;
345 	pcb->pcb_regs.sf_r7 = tf->tf_r7;
346 	pcb->pcb_regs.sf_r8 = tf->tf_r8;
347 	pcb->pcb_regs.sf_r9 = tf->tf_r9;
348 	pcb->pcb_regs.sf_r10 = tf->tf_r10;
349 	pcb->pcb_regs.sf_r11 = tf->tf_r11;
350 	pcb->pcb_regs.sf_r12 = tf->tf_r12;
351 	pcb->pcb_regs.sf_pc = tf->tf_pc;
352 	pcb->pcb_regs.sf_lr = tf->tf_usr_lr;
353 	pcb->pcb_regs.sf_sp = tf->tf_usr_sp;
354 }
355 
356 void
357 pcpu0_init(void)
358 {
359 	set_curthread(&thread0);
360 	pcpu_init(pcpup, 0, sizeof(struct pcpu));
361 	pcpup->pc_mpidr = cp15_mpidr_get() & 0xFFFFFF;
362 	PCPU_SET(curthread, &thread0);
363 }
364 
365 /*
366  * Initialize proc0
367  */
368 void
369 init_proc0(vm_offset_t kstack)
370 {
371 	proc_linkup0(&proc0, &thread0);
372 	thread0.td_kstack = kstack;
373 	thread0.td_kstack_pages = kstack_pages;
374 	thread0.td_pcb = (struct pcb *)(thread0.td_kstack +
375 	    thread0.td_kstack_pages * PAGE_SIZE) - 1;
376 	thread0.td_pcb->pcb_flags = 0;
377 	thread0.td_pcb->pcb_fpflags = 0;
378 	thread0.td_pcb->pcb_vfpcpu = -1;
379 	thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN;
380 	thread0.td_pcb->pcb_vfpsaved = &thread0.td_pcb->pcb_vfpstate;
381 	thread0.td_frame = &proc0_tf;
382 	pcpup->pc_curpcb = thread0.td_pcb;
383 }
384 
385 void
386 set_stackptrs(int cpu)
387 {
388 
389 	set_stackptr(PSR_IRQ32_MODE,
390 	    irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
391 	set_stackptr(PSR_ABT32_MODE,
392 	    abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
393 	set_stackptr(PSR_UND32_MODE,
394 	    undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1)));
395 }
396 
397 static void
398 arm_kdb_init(void)
399 {
400 
401 	kdb_init();
402 #ifdef KDB
403 	if (boothowto & RB_KDB)
404 		kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
405 #endif
406 }
407 
408 #ifdef FDT
409 void *
410 initarm(struct arm_boot_params *abp)
411 {
412 	struct mem_region mem_regions[FDT_MEM_REGIONS];
413 	vm_paddr_t lastaddr;
414 	vm_offset_t dtbp, kernelstack, dpcpu;
415 	char *env;
416 	void *kmdp;
417 	int err_devmap, mem_regions_sz;
418 	phandle_t root;
419 	char dts_version[255];
420 #ifdef EFI
421 	struct efi_map_header *efihdr;
422 #endif
423 
424 	/* get last allocated physical address */
425 	arm_physmem_kernaddr = abp->abp_physaddr;
426 	lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr;
427 
428 	set_cpufuncs();
429 	cpuinfo_init();
430 
431 	/*
432 	 * Find the dtb passed in by the boot loader.
433 	 */
434 	kmdp = preload_search_by_type("elf kernel");
435 	dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
436 #if defined(FDT_DTB_STATIC)
437 	/*
438 	 * In case the device tree blob was not retrieved (from metadata) try
439 	 * to use the statically embedded one.
440 	 */
441 	if (dtbp == (vm_offset_t)NULL)
442 		dtbp = (vm_offset_t)&fdt_static_dtb;
443 #endif
444 
445 	if (OF_install(OFW_FDT, 0) == FALSE)
446 		panic("Cannot install FDT");
447 
448 	if (OF_init((void *)dtbp) != 0)
449 		panic("OF_init failed with the found device tree");
450 
451 #if defined(LINUX_BOOT_ABI)
452 	arm_parse_fdt_bootargs();
453 #endif
454 
455 #ifdef EFI
456 	efihdr = (struct efi_map_header *)preload_search_info(kmdp,
457 	    MODINFO_METADATA | MODINFOMD_EFI_MAP);
458 	if (efihdr != NULL) {
459 		arm_add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz);
460 	} else
461 #endif
462 	{
463 		/* Grab physical memory regions information from device tree. */
464 		if (fdt_get_mem_regions(mem_regions, &mem_regions_sz,NULL) != 0)
465 			panic("Cannot get physical memory regions");
466 	}
467 	physmem_hardware_regions(mem_regions, mem_regions_sz);
468 
469 	/* Grab reserved memory regions information from device tree. */
470 	if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0)
471 		physmem_exclude_regions(mem_regions, mem_regions_sz,
472 		    EXFLAG_NODUMP | EXFLAG_NOALLOC);
473 
474 	/*
475 	 * Set TEX remapping registers.
476 	 * Setup kernel page tables and switch to kernel L1 page table.
477 	 */
478 	pmap_set_tex();
479 	pmap_bootstrap_prepare(lastaddr);
480 
481 	/*
482 	 * If EARLY_PRINTF support is enabled, we need to re-establish the
483 	 * mapping after pmap_bootstrap_prepare() switches to new page tables.
484 	 * Note that we can only do the remapping if the VA is outside the
485 	 * kernel, now that we have real virtual (not VA=PA) mappings in effect.
486 	 * Early printf does not work between the time pmap_set_tex() does
487 	 * cp15_prrr_set() and this code remaps the VA.
488 	 */
489 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
490 	pmap_preboot_map_attr(SOCDEV_PA, SOCDEV_VA, 1024 * 1024,
491 	    VM_PROT_READ | VM_PROT_WRITE, VM_MEMATTR_DEVICE);
492 #endif
493 
494 	/*
495 	 * Now that proper page tables are installed, call cpu_setup() to enable
496 	 * instruction and data caches and other chip-specific features.
497 	 */
498 	cpu_setup();
499 
500 	/* Platform-specific initialisation */
501 	platform_probe_and_attach();
502 	pcpu0_init();
503 
504 	/* Do basic tuning, hz etc */
505 	init_param1();
506 
507 	/*
508 	 * Allocate a page for the system page mapped to 0xffff0000
509 	 * This page will just contain the system vectors and can be
510 	 * shared by all processes.
511 	 */
512 	systempage = pmap_preboot_get_pages(1);
513 
514 	/* Map the vector page. */
515 	pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH,  1);
516 	if (virtual_end >= ARM_VECTORS_HIGH)
517 		virtual_end = ARM_VECTORS_HIGH - 1;
518 
519 	/* Allocate dynamic per-cpu area. */
520 	dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE);
521 	dpcpu_init((void *)dpcpu, 0);
522 
523 	/* Allocate stacks for all modes */
524 	irqstack    = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU);
525 	abtstack    = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU);
526 	undstack    = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU );
527 	kernelstack = pmap_preboot_get_vpages(kstack_pages);
528 
529 	/* Allocate message buffer. */
530 	msgbufp = (void *)pmap_preboot_get_vpages(
531 	    round_page(msgbufsize) / PAGE_SIZE);
532 
533 	/*
534 	 * Pages were allocated during the secondary bootstrap for the
535 	 * stacks for different CPU modes.
536 	 * We must now set the r13 registers in the different CPU modes to
537 	 * point to these stacks.
538 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
539 	 * of the stack memory.
540 	 */
541 	set_stackptrs(0);
542 	mutex_init();
543 
544 	/* Establish static device mappings. */
545 	err_devmap = platform_devmap_init();
546 	devmap_bootstrap(0, NULL);
547 	vm_max_kernel_address = platform_lastaddr();
548 
549 	/*
550 	 * Only after the SOC registers block is mapped we can perform device
551 	 * tree fixups, as they may attempt to read parameters from hardware.
552 	 */
553 	OF_interpret("perform-fixup", 0);
554 	platform_gpio_init();
555 	cninit();
556 
557 	/*
558 	 * If we made a mapping for EARLY_PRINTF after pmap_bootstrap_prepare(),
559 	 * undo it now that the normal console printf works.
560 	 */
561 #if defined(EARLY_PRINTF) && defined(SOCDEV_PA) && defined(SOCDEV_VA) && SOCDEV_VA < KERNBASE
562 	pmap_kremove(SOCDEV_VA);
563 #endif
564 
565 	debugf("initarm: console initialized\n");
566 	debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp);
567 	debugf(" boothowto = 0x%08x\n", boothowto);
568 	debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
569 	debugf(" lastaddr1: 0x%08x\n", lastaddr);
570 	arm_print_kenv();
571 
572 	env = kern_getenv("kernelname");
573 	if (env != NULL)
574 		strlcpy(kernelname, env, sizeof(kernelname));
575 
576 	if (err_devmap != 0)
577 		printf("WARNING: could not fully configure devmap, error=%d\n",
578 		    err_devmap);
579 
580 	platform_late_init();
581 
582 	root = OF_finddevice("/");
583 	if (OF_getprop(root, "freebsd,dts-version", dts_version, sizeof(dts_version)) > 0) {
584 		if (strcmp(LINUX_DTS_VERSION, dts_version) != 0)
585 			printf("WARNING: DTB version is %s while kernel expects %s, "
586 			    "please update the DTB in the ESP\n",
587 			    dts_version,
588 			    LINUX_DTS_VERSION);
589 	} else {
590 		printf("WARNING: Cannot find freebsd,dts-version property, "
591 		    "cannot check DTB compliance\n");
592 	}
593 
594 	/*
595 	 * We must now clean the cache again....
596 	 * Cleaning may be done by reading new data to displace any
597 	 * dirty data in the cache. This will have happened in cpu_setttb()
598 	 * but since we are boot strapping the addresses used for the read
599 	 * may have just been remapped and thus the cache could be out
600 	 * of sync. A re-clean after the switch will cure this.
601 	 * After booting there are no gross relocations of the kernel thus
602 	 * this problem will not occur after initarm().
603 	 */
604 	/* Set stack for exception handlers */
605 	undefined_init();
606 	init_proc0(kernelstack);
607 	arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
608 	enable_interrupts(PSR_A);
609 	pmap_bootstrap(0);
610 
611 	/* Exclude the kernel (and all the things we allocated which immediately
612 	 * follow the kernel) from the VM allocation pool but not from crash
613 	 * dumps.  virtual_avail is a global variable which tracks the kva we've
614 	 * "allocated" while setting up pmaps.
615 	 *
616 	 * Prepare the list of physical memory available to the vm subsystem.
617 	 */
618 	physmem_exclude_region(abp->abp_physaddr,
619 		pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC);
620 	physmem_init_kernel_globals();
621 
622 	init_param2(physmem);
623 	/* Init message buffer. */
624 	msgbufinit(msgbufp, msgbufsize);
625 	dbg_monitor_init();
626 	arm_kdb_init();
627 	/* Apply possible BP hardening. */
628 	cpuinfo_init_bp_hardening();
629 	return ((void *)STACKALIGN(thread0.td_pcb));
630 
631 }
632 #endif /* FDT */
633