xref: /illumos-gate/usr/src/uts/sun4v/cpu/niagara.c (revision f48205be)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/archsystm.h>
32 #include <sys/machparam.h>
33 #include <sys/machsystm.h>
34 #include <sys/cpu.h>
35 #include <sys/elf_SPARC.h>
36 #include <vm/hat_sfmmu.h>
37 #include <vm/page.h>
38 #include <sys/cpuvar.h>
39 #include <sys/async.h>
40 #include <sys/cmn_err.h>
41 #include <sys/debug.h>
42 #include <sys/dditypes.h>
43 #include <sys/sunddi.h>
44 #include <sys/cpu_module.h>
45 #include <sys/prom_debug.h>
46 #include <sys/vmsystm.h>
47 #include <sys/prom_plat.h>
48 #include <sys/sysmacros.h>
49 #include <sys/intreg.h>
50 #include <sys/machtrap.h>
51 #include <sys/ontrap.h>
52 #include <sys/ivintr.h>
53 #include <sys/atomic.h>
54 #include <sys/panic.h>
55 #include <sys/dtrace.h>
56 #include <sys/simulate.h>
57 #include <sys/fault.h>
58 #include <sys/niagararegs.h>
59 #include <sys/trapstat.h>
60 #include <sys/hsvc.h>
61 
62 #define	NI_MMU_PAGESIZE_MASK	((1 << TTE8K) | (1 << TTE64K) | (1 << TTE4M) \
63 				    | (1 << TTE256M))
64 
65 uint_t root_phys_addr_lo_mask = 0xffffffffU;
66 static niagara_mmustat_t *cpu_tstat_va;		/* VA of mmustat buffer */
67 static uint64_t cpu_tstat_pa;			/* PA of mmustat buffer */
68 char cpu_module_name[] = "SUNW,UltraSPARC-T1";
69 
70 /*
71  * Hypervisor services information for the NIAGARA CPU module
72  */
73 static boolean_t niagara_hsvc_available = B_TRUE;
74 static uint64_t niagara_sup_minor;		/* Supported minor number */
75 static hsvc_info_t niagara_hsvc = {
76 	HSVC_REV_1, NULL, HSVC_GROUP_NIAGARA_CPU, 1, 0, cpu_module_name
77 };
78 
79 void
80 cpu_setup(void)
81 {
82 	extern int mmu_exported_pagesize_mask;
83 	extern int cpc_has_overflow_intr;
84 	int status;
85 	char *ni_isa_set[] = {
86 	    "sparcv9+vis",
87 	    "sparcv9+vis2",
88 	    "sparcv8plus+vis",
89 	    "sparcv8plus+vis2",
90 	    NULL
91 	};
92 
93 	/*
94 	 * Negotiate the API version for Niagara specific hypervisor
95 	 * services.
96 	 */
97 	status = hsvc_register(&niagara_hsvc, &niagara_sup_minor);
98 	if (status != 0) {
99 		cmn_err(CE_WARN, "%s: cannot negotiate hypervisor services "
100 		    "group: 0x%lx major: 0x%lx minor: 0x%lx errno: %d\n",
101 		    niagara_hsvc.hsvc_modname, niagara_hsvc.hsvc_group,
102 		    niagara_hsvc.hsvc_major, niagara_hsvc.hsvc_minor, status);
103 		niagara_hsvc_available = B_FALSE;
104 	}
105 
106 	/*
107 	 * The setup common to all CPU modules is done in cpu_setup_common
108 	 * routine.
109 	 */
110 	cpu_setup_common(ni_isa_set);
111 
112 	cache |= (CACHE_PTAG | CACHE_IOCOHERENT);
113 
114 	if (broken_md_flag) {
115 		/*
116 		 * Turn on the missing bits supported by Niagara CPU in
117 		 * MMU pagesize mask returned by MD.
118 		 */
119 		mmu_exported_pagesize_mask |= NI_MMU_PAGESIZE_MASK;
120 	} else {
121 		if ((mmu_exported_pagesize_mask &
122 		    DEFAULT_SUN4V_MMU_PAGESIZE_MASK) !=
123 		    DEFAULT_SUN4V_MMU_PAGESIZE_MASK)
124 			cmn_err(CE_PANIC, "machine description"
125 			    " does not have required sun4v page sizes"
126 			    " 8K, 64K and 4M: MD mask is 0x%x",
127 			    mmu_exported_pagesize_mask);
128 	}
129 
130 	cpu_hwcap_flags |= AV_SPARC_ASI_BLK_INIT;
131 
132 	/*
133 	 * Niagara supports a 48-bit subset of the full 64-bit virtual
134 	 * address space. Virtual addresses between 0x0000800000000000
135 	 * and 0xffff.7fff.ffff.ffff inclusive lie within a "VA Hole"
136 	 * and must never be mapped. In addition, software must not use
137 	 * pages within 4GB of the VA hole as instruction pages to
138 	 * avoid problems with prefetching into the VA hole.
139 	 */
140 	hole_start = (caddr_t)((1ull << (va_bits - 1)) - (1ull << 32));
141 	hole_end = (caddr_t)((0ull - (1ull << (va_bits - 1))) + (1ull << 32));
142 
143 	/*
144 	 * Niagara has a performance counter overflow interrupt
145 	 */
146 	cpc_has_overflow_intr = 1;
147 }
148 
149 #define	MB(n)	((n) * 1024 * 1024)
150 /*
151  * Set the magic constants of the implementation.
152  */
153 void
154 cpu_fiximp(struct cpu_node *cpunode)
155 {
156 	/*
157 	 * The Cache node is optional in MD. Therefore in case "Cache"
158 	 * node does not exists in MD, set the default L2 cache associativity,
159 	 * size, linesize.
160 	 */
161 	if (cpunode->ecache_size == 0)
162 		cpunode->ecache_size = MB(3);
163 	if (cpunode->ecache_linesize == 0)
164 		cpunode->ecache_linesize = 64;
165 	if (cpunode->ecache_associativity == 0)
166 		cpunode->ecache_associativity = 12;
167 }
168 
169 void
170 cpu_map_exec_units(struct cpu *cp)
171 {
172 	ASSERT(MUTEX_HELD(&cpu_lock));
173 
174 	/*
175 	 * The cpu_ipipe and cpu_fpu fields are initialized based on
176 	 * the execution unit sharing information from the MD. They
177 	 * default to the CPU id in the absence of such information.
178 	 */
179 	cp->cpu_m.cpu_ipipe = cpunodes[cp->cpu_id].exec_unit_mapping;
180 	if (cp->cpu_m.cpu_ipipe == NO_EU_MAPPING_FOUND)
181 		cp->cpu_m.cpu_ipipe = (id_t)(cp->cpu_id);
182 
183 	cp->cpu_m.cpu_fpu = cpunodes[cp->cpu_id].fpu_mapping;
184 	if (cp->cpu_m.cpu_fpu == NO_EU_MAPPING_FOUND)
185 		cp->cpu_m.cpu_fpu = (id_t)(cp->cpu_id);
186 
187 	/*
188 	 * Niagara defines the the core to be at the ipipe level
189 	 */
190 	cp->cpu_m.cpu_core = cp->cpu_m.cpu_ipipe;
191 }
192 
193 static int niagara_cpucnt;
194 
195 void
196 cpu_init_private(struct cpu *cp)
197 {
198 	extern void niagara_kstat_init(void);
199 
200 	ASSERT(MUTEX_HELD(&cpu_lock));
201 
202 	cpu_map_exec_units(cp);
203 
204 	if ((niagara_cpucnt++ == 0) && (niagara_hsvc_available == B_TRUE))
205 		niagara_kstat_init();
206 }
207 
208 /*ARGSUSED*/
209 void
210 cpu_uninit_private(struct cpu *cp)
211 {
212 	extern void niagara_kstat_fini(void);
213 
214 	ASSERT(MUTEX_HELD(&cpu_lock));
215 
216 	if ((--niagara_cpucnt == 0) && (niagara_hsvc_available == B_TRUE))
217 		niagara_kstat_fini();
218 }
219 
220 /*
221  * On Niagara, any flush will cause all preceding stores to be
222  * synchronized wrt the i$, regardless of address or ASI.  In fact,
223  * the address is ignored, so we always flush address 0.
224  */
225 void
226 dtrace_flush_sec(uintptr_t addr)
227 {
228 	doflush(0);
229 }
230 
231 #define	IS_FLOAT(i) (((i) & 0x1000000) != 0)
232 #define	IS_IBIT_SET(x)	(x & 0x2000)
233 #define	IS_VIS1(op, op3)(op == 2 && op3 == 0x36)
234 #define	IS_PARTIAL_OR_SHORT_FLOAT_LD_ST(op, op3, asi)		\
235 		(op == 3 && (op3 == IOP_V8_LDDFA ||		\
236 		op3 == IOP_V8_STDFA) &&	asi > ASI_SNFL)
237 int
238 vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault)
239 {
240 	char *badaddr;
241 	int instr;
242 	uint_t	optype, op3, asi;
243 	uint_t	rd, ignor;
244 
245 	if (!USERMODE(rp->r_tstate))
246 		return (-1);
247 
248 	instr = fetch_user_instr((caddr_t)rp->r_pc);
249 
250 	rd = (instr >> 25) & 0x1f;
251 	optype = (instr >> 30) & 0x3;
252 	op3 = (instr >> 19) & 0x3f;
253 	ignor = (instr >> 5) & 0xff;
254 	if (IS_IBIT_SET(instr)) {
255 		asi = (uint32_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
256 		    TSTATE_ASI_MASK);
257 	} else {
258 		asi = ignor;
259 	}
260 
261 	if (!IS_VIS1(optype, op3) &&
262 	    !IS_PARTIAL_OR_SHORT_FLOAT_LD_ST(optype, op3, asi)) {
263 		return (-1);
264 	}
265 	switch (simulate_unimp(rp, &badaddr)) {
266 	case SIMU_RETRY:
267 		break;	/* regs are already set up */
268 		/*NOTREACHED*/
269 
270 	case SIMU_SUCCESS:
271 		/*
272 		 * skip the successfully
273 		 * simulated instruction
274 		 */
275 		rp->r_pc = rp->r_npc;
276 		rp->r_npc += 4;
277 		break;
278 		/*NOTREACHED*/
279 
280 	case SIMU_FAULT:
281 		siginfo->si_signo = SIGSEGV;
282 		siginfo->si_code = SEGV_MAPERR;
283 		siginfo->si_addr = badaddr;
284 		*fault = FLTBOUNDS;
285 		break;
286 
287 	case SIMU_DZERO:
288 		siginfo->si_signo = SIGFPE;
289 		siginfo->si_code = FPE_INTDIV;
290 		siginfo->si_addr = (caddr_t)rp->r_pc;
291 		*fault = FLTIZDIV;
292 		break;
293 
294 	case SIMU_UNALIGN:
295 		siginfo->si_signo = SIGBUS;
296 		siginfo->si_code = BUS_ADRALN;
297 		siginfo->si_addr = badaddr;
298 		*fault = FLTACCESS;
299 		break;
300 
301 	case SIMU_ILLEGAL:
302 	default:
303 		siginfo->si_signo = SIGILL;
304 		op3 = (instr >> 19) & 0x3F;
305 		if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
306 		    (op3 == IOP_V8_STDFA)))
307 			siginfo->si_code = ILL_ILLADR;
308 		else
309 			siginfo->si_code = ILL_ILLOPC;
310 		siginfo->si_addr = (caddr_t)rp->r_pc;
311 		*fault = FLTILL;
312 		break;
313 	}
314 	return (0);
315 }
316 
317 /*
318  * Trapstat support for Niagara processor
319  */
320 int
321 cpu_trapstat_conf(int cmd)
322 {
323 	size_t len;
324 	uint64_t mmustat_pa, hvret;
325 	int status = 0;
326 
327 	if (niagara_hsvc_available == B_FALSE)
328 		return (ENOTSUP);
329 
330 	switch (cmd) {
331 	case CPU_TSTATCONF_INIT:
332 		ASSERT(cpu_tstat_va == NULL);
333 		len = (NCPU+1) * sizeof (niagara_mmustat_t);
334 		cpu_tstat_va = contig_mem_alloc_align(len,
335 		    sizeof (niagara_mmustat_t));
336 		if (cpu_tstat_va == NULL)
337 			status = EAGAIN;
338 		else {
339 			bzero(cpu_tstat_va, len);
340 			cpu_tstat_pa = va_to_pa(cpu_tstat_va);
341 		}
342 		break;
343 
344 	case CPU_TSTATCONF_FINI:
345 		if (cpu_tstat_va) {
346 			len = (NCPU+1) * sizeof (niagara_mmustat_t);
347 			contig_mem_free(cpu_tstat_va, len);
348 			cpu_tstat_va = NULL;
349 			cpu_tstat_pa = 0;
350 		}
351 		break;
352 
353 	case CPU_TSTATCONF_ENABLE:
354 		hvret = hv_niagara_mmustat_conf((cpu_tstat_pa +
355 		    (CPU->cpu_id+1) * sizeof (niagara_mmustat_t)),
356 		    (uint64_t *)&mmustat_pa);
357 		if (hvret != H_EOK)
358 			status = EINVAL;
359 		break;
360 
361 	case CPU_TSTATCONF_DISABLE:
362 		hvret = hv_niagara_mmustat_conf(0, (uint64_t *)&mmustat_pa);
363 		if (hvret != H_EOK)
364 			status = EINVAL;
365 		break;
366 
367 	default:
368 		status = EINVAL;
369 		break;
370 	}
371 	return (status);
372 }
373 
374 void
375 cpu_trapstat_data(void *buf, uint_t tstat_pgszs)
376 {
377 	niagara_mmustat_t	*mmustatp;
378 	tstat_pgszdata_t	*tstatp = (tstat_pgszdata_t *)buf;
379 	int	i, pgcnt;
380 
381 	if (cpu_tstat_va == NULL)
382 		return;
383 
384 	mmustatp = &((niagara_mmustat_t *)cpu_tstat_va)[CPU->cpu_id+1];
385 	if (tstat_pgszs > NIAGARA_MMUSTAT_PGSZS)
386 		tstat_pgszs = NIAGARA_MMUSTAT_PGSZS;
387 
388 	for (i = 0; i < tstat_pgszs; i++, tstatp++) {
389 		tstatp->tpgsz_kernel.tmode_itlb.ttlb_tlb.tmiss_count =
390 		    mmustatp->kitsb[i].tsbhit_count;
391 		tstatp->tpgsz_kernel.tmode_itlb.ttlb_tlb.tmiss_time =
392 		    mmustatp->kitsb[i].tsbhit_time;
393 		tstatp->tpgsz_user.tmode_itlb.ttlb_tlb.tmiss_count =
394 		    mmustatp->uitsb[i].tsbhit_count;
395 		tstatp->tpgsz_user.tmode_itlb.ttlb_tlb.tmiss_time =
396 		    mmustatp->uitsb[i].tsbhit_time;
397 		tstatp->tpgsz_kernel.tmode_dtlb.ttlb_tlb.tmiss_count =
398 		    mmustatp->kdtsb[i].tsbhit_count;
399 		tstatp->tpgsz_kernel.tmode_dtlb.ttlb_tlb.tmiss_time =
400 		    mmustatp->kdtsb[i].tsbhit_time;
401 		tstatp->tpgsz_user.tmode_dtlb.ttlb_tlb.tmiss_count =
402 		    mmustatp->udtsb[i].tsbhit_count;
403 		tstatp->tpgsz_user.tmode_dtlb.ttlb_tlb.tmiss_time =
404 		    mmustatp->udtsb[i].tsbhit_time;
405 	}
406 }
407