xref: /minix/minix/kernel/arch/earm/arch_system.c (revision 83133719)
1 /* system dependent functions for use inside the whole kernel. */
2 
3 #include "kernel/kernel.h"
4 
5 #include <unistd.h>
6 #include <ctype.h>
7 #include <string.h>
8 #include <minix/cpufeature.h>
9 #include <assert.h>
10 #include <signal.h>
11 #include <machine/vm.h>
12 #include <machine/signal.h>
13 #include <arm/armreg.h>
14 
15 #include <minix/u64.h>
16 
17 #include "archconst.h"
18 #include "arch_proto.h"
19 #include "kernel/proc.h"
20 #include "kernel/debug.h"
21 #include "ccnt.h"
22 #include "bsp_init.h"
23 #include "bsp_serial.h"
24 
25 #include "glo.h"
26 
27 void * k_stacks;
28 
29 
30 void fpu_init(void)
31 {
32 }
33 
34 void save_local_fpu(struct proc *pr, int retain)
35 {
36 }
37 
38 void save_fpu(struct proc *pr)
39 {
40 }
41 
42 void arch_proc_reset(struct proc *pr)
43 {
44 	assert(pr->p_nr < NR_PROCS);
45 
46 	/* Clear process state. */
47 	memset(&pr->p_reg, 0, sizeof(pr->p_reg));
48 	if(iskerneln(pr->p_nr)) {
49 		pr->p_reg.psr = INIT_TASK_PSR;
50 	} else {
51 		pr->p_reg.psr = INIT_PSR;
52 	}
53 }
54 
55 void arch_proc_setcontext(struct proc *p, struct stackframe_s *state,
56 	int isuser, int trapstyle)
57 {
58         assert(sizeof(p->p_reg) == sizeof(*state));
59 	if(state != &p->p_reg) {
60 	        memcpy(&p->p_reg, state, sizeof(*state));
61 	}
62 
63         /* further code is instructed to not touch the context
64          * any more
65          */
66         p->p_misc_flags |= MF_CONTEXT_SET;
67 
68         if(!(p->p_rts_flags)) {
69                 printf("WARNINIG: setting full context of runnable process\n");
70                 print_proc(p);
71                 util_stacktrace();
72         }
73 }
74 
75 void arch_set_secondary_ipc_return(struct proc *p, u32_t val)
76 {
77 	p->p_reg.r1 = val;
78 }
79 
80 int restore_fpu(struct proc *pr)
81 {
82 	return 0;
83 }
84 
85 void cpu_identify(void)
86 {
87 	u32_t midr;
88 	unsigned cpu = cpuid;
89 
90 	asm volatile("mrc p15, 0, %[midr], c0, c0, 0 @ read MIDR\n\t"
91 		     : [midr] "=r" (midr));
92 
93 	cpu_info[cpu].implementer = midr >> 24;
94 	cpu_info[cpu].variant = (midr >> 20) & 0xF;
95 	cpu_info[cpu].arch = (midr >> 16) & 0xF;
96 	cpu_info[cpu].part = (midr >> 4) & 0xFFF;
97 	cpu_info[cpu].revision = midr & 0xF;
98 	cpu_info[cpu].freq = 660; /* 660 Mhz hardcoded */
99 }
100 
101 void arch_init(void)
102 {
103         u32_t value;
104 
105 	k_stacks = (void*) &k_stacks_start;
106 	assert(!((vir_bytes) k_stacks % K_STACK_SIZE));
107 
108 #ifndef CONFIG_SMP
109 	/*
110 	 * use stack 0 and cpu id 0 on a single processor machine, SMP
111 	 * configuration does this in smp_init() for all cpus at once
112 	 */
113 	tss_init(0, get_k_stack_top(0));
114 #endif
115 
116 
117         /* enable user space access to cycle counter */
118         /* set cycle counter to 0: ARM ARM B4.1.113 and B4.1.117 */
119         asm volatile ("MRC p15, 0, %0, c9, c12, 0\t\n": "=r" (value));
120         value |= PMU_PMCR_C; /* Reset counter */
121         value |= PMU_PMCR_E; /* Enable counter hardware */
122         asm volatile ("MCR p15, 0, %0, c9, c12, 0\t\n": : "r" (value));
123 
124         /* enable CCNT counting: ARM ARM B4.1.116 */
125         value = PMU_PMCNTENSET_C; /* Enable PMCCNTR cycle counter */
126         asm volatile ("MCR p15, 0, %0, c9, c12, 1\t\n": : "r" (value));
127 
128         /* enable cycle counter in user mode: ARM ARM B4.1.124 */
129         value = PMU_PMUSERENR_EN;
130         asm volatile ("MCR p15, 0, %0, c9, c14, 0\t\n": : "r" (value));
131 	bsp_init();
132 }
133 
134 /*===========================================================================*
135  *				do_ser_debug				     *
136  *===========================================================================*/
137 void do_ser_debug()
138 {
139 }
140 
141 void arch_do_syscall(struct proc *proc)
142 {
143   /* do_ipc assumes that it's running because of the current process */
144   assert(proc == get_cpulocal_var(proc_ptr));
145   /* Make the system call, for real this time. */
146   proc->p_reg.retreg =
147 	  do_ipc(proc->p_reg.retreg, proc->p_reg.r1, proc->p_reg.r2);
148 }
149 
150 reg_t svc_stack;
151 
152 struct proc * arch_finish_switch_to_user(void)
153 {
154 	char * stk;
155 	struct proc * p;
156 
157 #ifdef CONFIG_SMP
158 	stk = (char *)tss[cpuid].sp0;
159 #else
160 	stk = (char *)tss[0].sp0;
161 #endif
162 	svc_stack = (reg_t)stk;
163 	/* set pointer to the process to run on the stack */
164 	p = get_cpulocal_var(proc_ptr);
165 	*((reg_t *)stk) = (reg_t) p;
166 
167 	/* turn interrupts on */
168         p->p_reg.psr &= ~(PSR_I|PSR_F);
169 
170 	return p;
171 }
172 
173 void fpu_sigcontext(struct proc *pr, struct sigframe_sigcontext *fr, struct sigcontext *sc)
174 {
175 }
176 
177 reg_t arch_get_sp(struct proc *p) { return p->p_reg.sp; }
178 
179 void get_randomness(struct k_randomness *rand, int source)
180 {
181 }
182 
183 void arch_ser_init(void)
184 {
185 	bsp_ser_init();
186 }
187 
188 /*===========================================================================*/
189 /*			      __switch_address_space			     */
190 /*===========================================================================*/
191 /*
192  * sets the ttbr register to the supplied value if it is not already set to the
193  * same value in which case it would only result in an extra TLB flush which is
194  * not desirable
195  */
196 void __switch_address_space(struct proc *p, struct proc **__ptproc)
197 {
198 	reg_t orig_ttbr, new_ttbr;
199 
200 	new_ttbr = p->p_seg.p_ttbr;
201 	if (new_ttbr == 0)
202 	    return;
203 
204 	orig_ttbr = read_ttbr0();
205 
206 	/*
207 	 * test if ttbr is loaded with the current value to avoid unnecessary
208 	 * TLB flushes
209 	 */
210 	if (new_ttbr == orig_ttbr)
211 	    return;
212 
213 	write_ttbr0(new_ttbr);
214 
215 	*__ptproc = p;
216 
217 	return;
218 }
219