xref: /netbsd/sys/arch/epoc32/epoc32/machdep.c (revision 70e8a6bc)
1 /*	$NetBSD: machdep.c,v 1.9 2023/04/20 08:28:03 skrll Exp $	*/
2 /*
3  * Copyright (c) 2012, 2013 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.9 2023/04/20 08:28:03 skrll Exp $");
30 
31 #include "clpscom.h"
32 #include "clpslcd.h"
33 #include "wmcom.h"
34 #include "wmlcd.h"
35 #include "epockbd.h"
36 #include "ksyms.h"
37 #include "opt_ddb.h"
38 #include "opt_md.h"
39 #include "opt_modular.h"
40 
41 #include <sys/types.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/bus.h>
45 #include <sys/kernel.h>
46 #include <sys/lwp.h>
47 #include <sys/pmf.h>
48 #include <sys/reboot.h>
49 #include <sys/termios.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include <dev/cons.h>
54 #include <dev/md.h>
55 
56 #include <arm/locore.h>
57 #include <arm/undefined.h>
58 #include <arm/arm32/machdep.h>
59 #include <arm/arm32/pmap.h>
60 
61 #include <machine/bootconfig.h>
62 #include <machine/bootinfo.h>
63 #include <machine/epoc32.h>
64 
65 #include <arm/clps711x/clpssocvar.h>
66 #include <epoc32/windermere/windermerevar.h>
67 #include <epoc32/windermere/windermerereg.h>
68 #include <epoc32/dev/epockbdvar.h>
69 
70 #include <machine/db_machdep.h>
71 #include <ddb/db_extern.h>
72 
73 #define KERNEL_OFFSET		0x00030000
74 #define KERNEL_TEXT_BASE	(KERNEL_BASE + KERNEL_OFFSET)
75 #ifndef KERNEL_VM_BASE
76 #define KERNEL_VM_BASE		(KERNEL_BASE + 0x00300000)
77 #endif
78 #define KERNEL_VM_SIZE		0x04000000	/* XXXX 64M */
79 
80 /* Define various stack sizes in pages */
81 #define IRQ_STACK_SIZE	1
82 #define ABT_STACK_SIZE	1
83 #define UND_STACK_SIZE	1
84 
85 
86 BootConfig bootconfig;		/* Boot config storage */
87 static char bootargs[256];
88 char *boot_args = NULL;
89 
90 vaddr_t physical_start;
91 vaddr_t physical_freestart;
92 vaddr_t physical_freeend;
93 vaddr_t physical_end;
94 u_int free_pages;
95 
96 paddr_t msgbufphys;
97 
98 enum {
99 	KERNEL_PT_SYS = 0,	/* Page table for mapping proc0 zero page */
100 	KERNEL_PT_KERNEL,	/* Page table for mapping kernel and VM */
101 
102 	NUM_KERNEL_PTS
103 };
104 pv_addr_t kernel_pt_table[NUM_KERNEL_PTS];
105 
106 char epoc32_model[256];
107 int epoc32_fb_width;
108 int epoc32_fb_height;
109 int epoc32_fb_addr;
110 
111 /*
112  * Static device mappings. These peripheral registers are mapped at
113  * fixed virtual addresses very early in initarm() so that we can use
114  * them while booting the kernel, and stay at the same address
115  * throughout whole kernel's life time.
116  *
117  * We use this table twice; once with bootstrap page table, and once
118  * with kernel's page table which we build up in initarm().
119  *
120  * Since we map these registers into the bootstrap page table using
121  * pmap_devmap_bootstrap() which calls pmap_map_chunk(), we map
122  * registers segment-aligned and segment-rounded in order to avoid
123  * using the 2nd page tables.
124  */
125 
126 static const struct pmap_devmap epoc32_devmap[] = {
127 	DEVMAP_ENTRY(
128 		ARM7XX_INTRREG_VBASE,		/* included com, lcd-ctrl */
129 		ARM7XX_INTRREG_BASE,
130 		ARM7XX_INTRREG_SIZE
131 	),
132 
133 	DEVMAP_ENTRY_END
134 };
135 static const struct pmap_devmap epoc32_fb_devmap[] = {
136 	DEVMAP_ENTRY(
137 		ARM7XX_FB_VBASE,
138 		ARM7XX_FB_BASE,
139 		ARM7XX_FB_SIZE
140 	),
141 
142 	DEVMAP_ENTRY_END
143 };
144 
145 /*
146  * vaddr_t initarm(...)
147  *
148  * Initial entry point on startup. This gets called before main() is
149  * entered.
150  * It should be responsible for setting up everything that must be
151  * in place when main is called.
152  * This includes
153  *   Taking a copy of the boot configuration structure.
154  *   Initialising the physical console so characters can be printed.
155  *   Setting up page tables for the kernel
156  *   Relocating the kernel to the bottom of physical memory
157  */
158 vaddr_t
initarm(void * arg)159 initarm(void *arg)
160 {
161 	extern char _end[];
162 	extern vaddr_t startup_pagetable;
163 	extern struct btinfo_common bootinfo;
164 	struct btinfo_common *btinfo = &bootinfo;
165 	struct btinfo_model *model = NULL;
166 	struct btinfo_memory *memory = NULL;
167 	struct btinfo_video *video = NULL;
168 	struct btinfo_bootargs *args = NULL;
169 	u_int l1pagetable, _end_physical;
170 	int loop, loop1, n, i;
171 
172 	/*
173 	 * Heads up ... Setup the CPU / MMU / TLB functions
174 	 */
175 	if (set_cpufuncs())
176 		panic("cpu not recognized!");
177 
178 	/* map some peripheral registers at static I/O area. */
179 	pmap_devmap_bootstrap(startup_pagetable, epoc32_devmap);
180 
181 	bootconfig.dramblocks = 0;
182 	while (btinfo->type != BTINFO_NONE) {
183 		switch (btinfo->type) {
184 		case BTINFO_MODEL:
185 			model = (struct btinfo_model *)btinfo;
186 			btinfo = &(model + 1)->common;
187 			strncpy(epoc32_model, model->model,
188 			    sizeof(epoc32_model));
189 			break;
190 
191 		case BTINFO_MEMORY:
192 			memory = (struct btinfo_memory *)btinfo;
193 			btinfo = &(memory + 1)->common;
194 
195 			/*
196 			 * Fake bootconfig structure for the benefit of pmap.c
197 			 */
198 			i = bootconfig.dramblocks;
199 			bootconfig.dram[i].address = memory->address;
200 			bootconfig.dram[i].pages = memory->size / PAGE_SIZE;
201 			bootconfig.dramblocks++;
202 			break;
203 
204 		case BTINFO_VIDEO:
205 			video = (struct btinfo_video *)btinfo;
206 			btinfo = &(video + 1)->common;
207 			epoc32_fb_width = video->width;
208 			epoc32_fb_height = video->height;
209 			break;
210 
211 		case BTINFO_BOOTARGS:
212 			args = (struct btinfo_bootargs *)btinfo;
213 			btinfo = &(args + 1)->common;
214 			memcpy(bootargs, args->bootargs,
215 			    uimin(sizeof(bootargs), sizeof(args->bootargs)));
216 			bootargs[sizeof(bootargs) - 1] = '\0';
217 			boot_args = bootargs;
218 			break;
219 
220 		default:
221 #define NEXT_BOOTINFO(bi) (struct btinfo_common *)((char *)bi + (bi)->len)
222 
223 			btinfo = NEXT_BOOTINFO(btinfo);
224 		}
225 	}
226 	if (bootconfig.dramblocks == 0)
227 		panic("BTINFO_MEMORY not found");
228 
229 	consinit();
230 
231 	if (boot_args != NULL)
232 		parse_mi_bootargs(boot_args);
233 
234 	physical_start = bootconfig.dram[0].address;
235 	physical_freestart = bootconfig.dram[0].address;
236 	physical_freeend = KERNEL_TEXT_BASE;
237 
238 	free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
239 
240 	/* Define a macro to simplify memory allocation */
241 #define valloc_pages(var, np)				\
242 	alloc_pages((var).pv_pa, (np));			\
243 	(var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start;
244 
245 #define alloc_pages(var, np)				\
246 	physical_freeend -= ((np) * PAGE_SIZE);		\
247 	if (physical_freeend < physical_freestart)	\
248 		panic("initarm: out of memory");	\
249 	(var) = physical_freeend;			\
250 	free_pages -= (np);				\
251 	memset((char *)(var), 0, ((np) * PAGE_SIZE));
252 
253 	loop1 = 0;
254 	for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) {
255 		/* Are we 16KB aligned for an L1 ? */
256 		if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0
257 		    && kernel_l1pt.pv_pa == 0) {
258 			valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
259 		} else {
260 			valloc_pages(kernel_pt_table[loop1],
261 			    L2_TABLE_SIZE / PAGE_SIZE);
262 			++loop1;
263 		}
264 	}
265 
266 	/* This should never be able to happen but better confirm that. */
267 	if (!kernel_l1pt.pv_pa ||
268 	    (kernel_l1pt.pv_pa & (L1_TABLE_SIZE - 1)) != 0)
269 		panic("initarm: Failed to align the kernel page directory");
270 
271 	/*
272 	 * Allocate a page for the system page mapped to V0x00000000
273 	 * This page will just contain the system vectors and can be
274 	 * shared by all processes.
275 	 */
276 	alloc_pages(systempage.pv_pa, 1);
277 
278 	/* Allocate stacks for all modes */
279 	valloc_pages(irqstack, IRQ_STACK_SIZE);
280 	valloc_pages(abtstack, ABT_STACK_SIZE);
281 	valloc_pages(undstack, UND_STACK_SIZE);
282 	valloc_pages(kernelstack, UPAGES);
283 
284 	alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
285 
286 	/*
287 	 * Now we start construction of the L1 page table
288 	 * We start by mapping the L2 page tables into the L1.
289 	 * This means that we can replace L1 mappings later on if necessary
290 	 */
291 	l1pagetable = kernel_l1pt.pv_va;
292 
293 	/* Map the L2 pages tables in the L1 page table */
294 	pmap_link_l2pt(l1pagetable, 0x00000000,
295 	    &kernel_pt_table[KERNEL_PT_SYS]);
296 	pmap_link_l2pt(l1pagetable, KERNEL_BASE,
297 	    &kernel_pt_table[KERNEL_PT_KERNEL]);
298 
299 	/* update the top of the kernel VM */
300 	pmap_curmaxkvaddr = KERNEL_VM_BASE;
301 
302 	/* Now we fill in the L2 pagetable for the kernel static code/data */
303 	{
304 		extern char etext[];
305 		size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE;
306 		size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE;
307 		size_t datasize;
308 		PhysMem *dram = bootconfig.dram;
309 		u_int logical, physical, size;
310 
311 		textsize = (textsize + PGOFSET) & ~PGOFSET;
312 		totalsize = (totalsize + PGOFSET) & ~PGOFSET;
313 		datasize = totalsize - textsize;	/* data and bss */
314 
315 		logical = KERNEL_OFFSET;	/* offset of kernel in RAM */
316 		physical = KERNEL_OFFSET;
317 		i = 0;
318 		size = dram[i].pages * PAGE_SIZE - physical;
319 		/* Map kernel text section. */
320 		while (1 /*CONSTINT*/) {
321 			size = pmap_map_chunk(l1pagetable,
322 			    KERNEL_BASE + logical, dram[i].address + physical,
323 			    textsize < size ? textsize : size,
324 			    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
325 			logical += size;
326 			physical += size;
327 			textsize -= size;
328 			if (physical >= dram[i].pages * PAGE_SIZE) {
329 				i++;
330 				size = dram[i].pages * PAGE_SIZE;
331 				physical = 0;
332 			}
333 			if (textsize == 0)
334 				break;
335 		}
336 		size = dram[i].pages * PAGE_SIZE - physical;
337 		/* Map data and bss section. */
338 		while (1 /*CONSTINT*/) {
339 			size = pmap_map_chunk(l1pagetable,
340 			    KERNEL_BASE + logical, dram[i].address + physical,
341 			    datasize < size ? datasize : size,
342 			    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
343 			logical += size;
344 			physical += size;
345 			datasize -= size;
346 			if (physical >= dram[i].pages * PAGE_SIZE) {
347 				i++;
348 				size = dram[i].pages * PAGE_SIZE;
349 				physical = 0;
350 			}
351 			if (datasize == 0)
352 				break;
353 		}
354 		_end_physical = dram[i].address + physical;
355 		n = i;
356 		physical_end = dram[n].address + dram[n].pages * PAGE_SIZE;
357 		n++;
358 	}
359 
360 	/* Map the stack pages */
361 	pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
362 	    IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
363 	pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
364 	    ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
365 	pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
366 	    UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
367 	pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
368 	    UPAGES * PAGE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE);
369 
370         pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
371 	    L1_TABLE_SIZE, VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE);
372 
373 	for (loop = 0; loop < NUM_KERNEL_PTS; ++loop)
374 		pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
375 		    kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
376 		    VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
377 
378 	/* Map the vector page. */
379 	pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa,
380 	    VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
381 
382 	pmap_devmap_bootstrap(l1pagetable, epoc32_devmap);
383 	pmap_devmap_bootstrap(l1pagetable, epoc32_fb_devmap);
384 	epoc32_fb_addr = ARM7XX_FB_VBASE;
385 
386 	/*
387 	 * Now we have the real page tables in place so we can switch to them.
388 	 * Once this is done we will be running with the REAL kernel page
389 	 * tables.
390 	 */
391 
392 	/* Switch tables */
393 	cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
394 	cpu_setttb(kernel_l1pt.pv_pa, true);
395 	cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
396 
397 	/*
398 	 * Moved from cpu_startup() as data_abort_handler() references
399 	 * this during uvm init
400 	 */
401 	uvm_lwp_setuarea(&lwp0, kernelstack.pv_va);
402 
403 	arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL);
404 
405 	/*
406 	 * Pages were allocated during the secondary bootstrap for the
407 	 * stacks for different CPU modes.
408 	 * We must now set the r13 registers in the different CPU modes to
409 	 * point to these stacks.
410 	 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
411 	 * of the stack memory.
412 	 */
413 
414 	set_stackptr(PSR_IRQ32_MODE,
415 	    irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
416 	set_stackptr(PSR_ABT32_MODE,
417 	    abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
418 	set_stackptr(PSR_UND32_MODE,
419 	    undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
420 
421 	/*
422 	 * Well we should set a data abort handler.
423 	 * Once things get going this will change as we will need a proper
424 	 * handler. Until then we will use a handler that just panics but
425 	 * tells us why.
426 	 * Initialisation of the vectors will just panic on a data abort.
427 	 * This just fills in a slightly better one.
428 	 */
429 	data_abort_handler_address = (u_int)data_abort_handler;
430 	prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
431 	undefined_handler_address = (u_int)undefinedinstruction_bounce;
432 
433 	/* Initialise the undefined instruction handlers */
434 	undefined_init();
435 
436         /* Load memory into UVM. */
437 	uvm_md_init();
438 	uvm_page_physload(
439 	    atop(_end_physical), atop(physical_end),
440 	    atop(_end_physical), atop(physical_end),
441 	    VM_FREELIST_DEFAULT);
442 	physmem = bootconfig.dram[0].pages;
443 	for (i = 1; i < n; i++)
444 		physmem += bootconfig.dram[i].pages;
445 	if (physmem < 0x400000)
446 		physical_end = 0;
447 	for (loop = n; loop < bootconfig.dramblocks; loop++) {
448 		size_t start = bootconfig.dram[loop].address;
449 		size_t size = bootconfig.dram[loop].pages * PAGE_SIZE;
450 
451 		uvm_page_physload(atop(start), atop(start + size),
452 		    atop(start), atop(start + size), VM_FREELIST_DEFAULT);
453 		physmem += bootconfig.dram[loop].pages;
454 
455 		if (physical_end == 0 && physmem >= 0x400000 / PAGE_SIZE)
456 			/* Fixup physical_end for Series5. */
457 			physical_end = start + size;
458 	}
459 
460 	/* Boot strap pmap telling it where managed kernel virtual memory is */
461 	pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE);
462 
463 #ifdef __HAVE_MEMORY_DISK__
464 	md_root_setconf(memory_disk, sizeof memory_disk);
465 #endif
466 
467 #if NKSYMS || defined(DDB) || defined(MODULAR)
468 	/* Firmware doesn't load symbols. */
469 	ddb_init(0, NULL, NULL);
470 #endif
471 
472 #ifdef DDB
473 	db_machine_init();
474 	if (boothowto & RB_KDB)
475 		Debugger();
476 #endif
477 
478 	/* We return the new stack pointer address */
479 	return kernelstack.pv_va + USPACE_SVC_STACK_TOP;
480 }
481 
482 void
cpu_reboot(int howto,char * bootstr)483 cpu_reboot(int howto, char *bootstr)
484 {
485 
486 #ifdef DIAGNOSTIC
487 	/* info */
488 	printf("boot: howto=%08x curproc=%p\n", howto, curproc);
489 #endif
490 
491 	/*
492 	 * If we are still cold then hit the air brakes
493 	 * and crash to earth fast
494 	 */
495 	if (cold) {
496 		doshutdownhooks();
497 		pmf_system_shutdown(boothowto);
498 		printf("The operating system has halted.\n");
499 		printf("Please press any key to reboot.\n\n");
500 		cngetc();
501 		printf("rebooting...\n");
502 		cpu_reset();
503 		/*NOTREACHED*/
504 	}
505 
506 	/*
507 	 * If RB_NOSYNC was not specified sync the discs.
508 	 * Note: Unless cold is set to 1 here, syslogd will die during the
509 	 * unmount.  It looks like syslogd is getting woken up only to find
510 	 * that it cannot page part of the binary in as the filesystem has
511 	 * been unmounted.
512 	 */
513 	if (!(howto & RB_NOSYNC))
514 		bootsync();
515 
516 	/* Say NO to interrupts */
517 	splhigh();
518 
519 	/* Do a dump if requested. */
520 	if ((howto & (RB_DUMP | RB_HALT)) == RB_DUMP)
521 		dumpsys();
522 
523 	/* Run any shutdown hooks */
524 	doshutdownhooks();
525 
526 	pmf_system_shutdown(boothowto);
527 
528 	/* Make sure IRQ's are disabled */
529 	IRQdisable;
530 
531 	if (howto & RB_HALT) {
532 		printf("The operating system has halted.\n");
533 		printf("Please press any key to reboot.\n\n");
534 		cngetc();
535 	}
536 
537 	printf("rebooting...\n");
538 	cpu_reset();
539 	/*NOTREACHED*/
540 }
541 
542 void
consinit(void)543 consinit(void)
544 {
545 	static int consinit_called = 0;
546 #if (NWMCOM + NCLPSCOM) > 0
547 	const tcflag_t mode = (TTYDEF_CFLAG & ~(CSIZE | CSTOPB | PARENB)) | CS8;
548 #endif
549 
550 	if (consinit_called)
551 		return;
552 	consinit_called = 1;
553 
554 	if (strcmp(epoc32_model, "SERIES5 R1") == 0) {
555 #if NCLPSLCD > 0
556 		if (clpslcd_cnattach() == 0) {
557 #if NEPOCKBD > 0
558 			epockbd_cnattach();
559 #endif
560 			return;
561 		}
562 #endif
563 #if NCLPSCOM > 0
564 		if (clpscom_cnattach(ARM7XX_INTRREG_VBASE, 115200, mode) == 0)
565 			return;
566 #endif
567 	}
568 	if (strcmp(epoc32_model, "SERIES5mx") == 0) {
569 		vaddr_t vbase = ARM7XX_INTRREG_VBASE;
570 #if NWMCOM > 0
571 		vaddr_t offset;
572 		volatile uint8_t *gpio;
573 		int irda;
574 #endif
575 
576 #if NWMLCD > 0
577 		if (wmlcd_cnattach() == 0) {
578 #if NEPOCKBD > 0
579 			epockbd_cnattach();
580 #endif
581 			return;
582 		}
583 #endif
584 #if NWMCOM > 0
585 		gpio = (uint8_t *)ARM7XX_INTRREG_VBASE + WINDERMERE_GPIO_OFFSET;
586 		if (0) {
587 			/* Enable UART0 to PCDR */
588 			*(gpio + 0x08) |= 1 << 5;
589 			offset = WINDERMERE_COM0_OFFSET;
590 			irda = 1;			/* IrDA */
591 		} else {
592 			/* Enable UART1 to PCDR */
593 			*(gpio + 0x08) |= 1 << 3;
594 			offset = WINDERMERE_COM1_OFFSET;
595 			irda = 0;			/* UART */
596 		}
597 
598 		if (wmcom_cnattach(vbase + offset, 115200, mode, irda) == 0)
599 			return;
600 #endif
601 	}
602 	if (strcmp(epoc32_model, "SERIES7") == 0) {
603 	}
604 	panic("can't init console");
605 }
606