xref: /freebsd/sys/amd64/amd64/mp_machdep.c (revision bdd1243d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * Copyright (c) 2003, by Peter Wemm
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the developer may NOT be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_acpi.h"
33 #include "opt_cpu.h"
34 #include "opt_ddb.h"
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
37 #include "opt_smp.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
44 #include <sys/kdb.h>
45 #include <sys/kernel.h>
46 #include <sys/ktr.h>
47 #include <sys/lock.h>
48 #include <sys/malloc.h>
49 #include <sys/memrange.h>
50 #include <sys/mutex.h>
51 #include <sys/pcpu.h>
52 #include <sys/proc.h>
53 #include <sys/sched.h>
54 #include <sys/smp.h>
55 #include <sys/sysctl.h>
56 
57 #include <vm/vm.h>
58 #include <vm/vm_param.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_kern.h>
61 #include <vm/vm_extern.h>
62 #include <vm/vm_page.h>
63 #include <vm/vm_phys.h>
64 
65 #include <x86/apicreg.h>
66 #include <machine/clock.h>
67 #include <machine/cputypes.h>
68 #include <machine/cpufunc.h>
69 #include <x86/mca.h>
70 #include <machine/md_var.h>
71 #include <machine/pcb.h>
72 #include <machine/psl.h>
73 #include <machine/smp.h>
74 #include <machine/specialreg.h>
75 #include <machine/tss.h>
76 #include <x86/ucode.h>
77 #include <machine/cpu.h>
78 #include <x86/init.h>
79 
80 #ifdef DEV_ACPI
81 #include <contrib/dev/acpica/include/acpi.h>
82 #include <dev/acpica/acpivar.h>
83 #endif
84 
85 #define WARMBOOT_TARGET		0
86 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
87 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
88 
89 #define CMOS_REG		(0x70)
90 #define CMOS_DATA		(0x71)
91 #define BIOS_RESET		(0x0f)
92 #define BIOS_WARM		(0x0a)
93 
94 #define GiB(v)			(v ## ULL << 30)
95 
96 #define	AP_BOOTPT_SZ		(PAGE_SIZE * 4)
97 
98 /* Temporary variables for init_secondary()  */
99 static char *doublefault_stack;
100 static char *mce_stack;
101 static char *nmi_stack;
102 static char *dbg_stack;
103 void *bootpcpu;
104 
105 extern u_int mptramp_la57;
106 extern u_int mptramp_nx;
107 
108 /*
109  * Local data and functions.
110  */
111 
112 static int start_ap(int apic_id, vm_paddr_t boot_address);
113 
114 /*
115  * Initialize the IPI handlers and start up the AP's.
116  */
117 void
118 cpu_mp_start(void)
119 {
120 	int i;
121 
122 	/* Initialize the logical ID to APIC ID table. */
123 	for (i = 0; i < MAXCPU; i++) {
124 		cpu_apic_ids[i] = -1;
125 	}
126 
127 	/* Install an inter-CPU IPI for cache and TLB invalidations. */
128 	setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
129 	    SDT_SYSIGT, SEL_KPL, 0);
130 
131 	/* Install an inter-CPU IPI for all-CPU rendezvous */
132 	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
133 	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
134 
135 	/* Install generic inter-CPU IPI handler */
136 	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
137 	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
138 
139 	/* Install an inter-CPU IPI for CPU stop/restart */
140 	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
141 	    SDT_SYSIGT, SEL_KPL, 0);
142 
143 	/* Install an inter-CPU IPI for CPU suspend/resume */
144 	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
145 	    SDT_SYSIGT, SEL_KPL, 0);
146 
147 	/* Install an IPI for calling delayed SWI */
148 	setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
149 	    SDT_SYSIGT, SEL_KPL, 0);
150 
151 	/* Set boot_cpu_id if needed. */
152 	if (boot_cpu_id == -1) {
153 		boot_cpu_id = PCPU_GET(apic_id);
154 		cpu_info[boot_cpu_id].cpu_bsp = 1;
155 	} else
156 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
157 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
158 
159 	/* Probe logical/physical core configuration. */
160 	topo_probe();
161 
162 	assign_cpu_ids();
163 
164 	mptramp_la57 = la57;
165 	mptramp_nx = pg_nx != 0;
166 	MPASS(kernel_pmap->pm_cr3 < (1UL << 32));
167 	mptramp_pagetables = kernel_pmap->pm_cr3;
168 
169 	/* Start each Application Processor */
170 	start_all_aps();
171 
172 	set_interrupt_apic_ids();
173 
174 #if defined(DEV_ACPI) && MAXMEMDOM > 1
175 	acpi_pxm_set_cpu_locality();
176 #endif
177 }
178 
179 /*
180  * AP CPU's call this to initialize themselves.
181  */
182 void
183 init_secondary(void)
184 {
185 	struct pcpu *pc;
186 	struct nmi_pcpu *np;
187 	struct user_segment_descriptor *gdt;
188 	struct region_descriptor ap_gdt;
189 	u_int64_t cr0;
190 	int cpu, gsel_tss, x;
191 
192 	/* Set by the startup code for us to use */
193 	cpu = bootAP;
194 
195 	/* Update microcode before doing anything else. */
196 	ucode_load_ap(cpu);
197 
198 	/* Initialize the PCPU area. */
199 	pc = bootpcpu;
200 	pcpu_init(pc, cpu, sizeof(struct pcpu));
201 	dpcpu_init(dpcpu, cpu);
202 	pc->pc_apic_id = cpu_apic_ids[cpu];
203 	pc->pc_prvspace = pc;
204 	pc->pc_curthread = 0;
205 	pc->pc_tssp = &pc->pc_common_tss;
206 	pc->pc_rsp0 = 0;
207 	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
208 	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
209 	gdt = pc->pc_gdt;
210 	pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
211 	pc->pc_fs32p = &gdt[GUFS32_SEL];
212 	pc->pc_gs32p = &gdt[GUGS32_SEL];
213 	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
214 	pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
215 	/* See comment in pmap_bootstrap(). */
216 	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
217 	pc->pc_pcid_gen = 1;
218 	pc->pc_kpmap_store.pm_pcid = PMAP_PCID_KERN;
219 	pc->pc_kpmap_store.pm_gen = 1;
220 
221 	pc->pc_smp_tlb_gen = 1;
222 
223 	/* Init tss */
224 	pc->pc_common_tss = __pcpu[0].pc_common_tss;
225 	pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
226 	    IOPERM_BITMAP_SIZE;
227 	pc->pc_common_tss.tss_rsp0 = 0;
228 
229 	/* The doublefault stack runs on IST1. */
230 	np = ((struct nmi_pcpu *)&doublefault_stack[DBLFAULT_STACK_SIZE]) - 1;
231 	np->np_pcpu = (register_t)pc;
232 	pc->pc_common_tss.tss_ist1 = (long)np;
233 
234 	/* The NMI stack runs on IST2. */
235 	np = ((struct nmi_pcpu *)&nmi_stack[NMI_STACK_SIZE]) - 1;
236 	np->np_pcpu = (register_t)pc;
237 	pc->pc_common_tss.tss_ist2 = (long)np;
238 
239 	/* The MC# stack runs on IST3. */
240 	np = ((struct nmi_pcpu *)&mce_stack[MCE_STACK_SIZE]) - 1;
241 	np->np_pcpu = (register_t)pc;
242 	pc->pc_common_tss.tss_ist3 = (long)np;
243 
244 	/* The DB# stack runs on IST4. */
245 	np = ((struct nmi_pcpu *)&dbg_stack[DBG_STACK_SIZE]) - 1;
246 	np->np_pcpu = (register_t)pc;
247 	pc->pc_common_tss.tss_ist4 = (long)np;
248 
249 	/* Prepare private GDT */
250 	gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
251 	for (x = 0; x < NGDT; x++) {
252 		if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
253 		    x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
254 			ssdtosd(&gdt_segs[x], &gdt[x]);
255 	}
256 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
257 	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
258 	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
259 	ap_gdt.rd_base = (u_long)gdt;
260 	lgdt(&ap_gdt);			/* does magic intra-segment return */
261 
262 	wrmsr(MSR_FSBASE, 0);		/* User value */
263 	wrmsr(MSR_GSBASE, (uint64_t)pc);
264 	wrmsr(MSR_KGSBASE, 0);		/* User value */
265 	fix_cpuid();
266 
267 	lidt(&r_idt);
268 
269 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
270 	ltr(gsel_tss);
271 
272 	/*
273 	 * Set to a known state:
274 	 * Set by mpboot.s: CR0_PG, CR0_PE
275 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
276 	 */
277 	cr0 = rcr0();
278 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
279 	load_cr0(cr0);
280 
281 	amd64_conf_fast_syscall();
282 
283 	/* signal our startup to the BSP. */
284 	mp_naps++;
285 
286 	/* Spin until the BSP releases the AP's. */
287 	while (atomic_load_acq_int(&aps_ready) == 0)
288 		ia32_pause();
289 
290 	init_secondary_tail();
291 }
292 
293 static void
294 amd64_mp_alloc_pcpu(void)
295 {
296 	vm_page_t m;
297 	int cpu;
298 
299 	/* Allocate pcpu areas to the correct domain. */
300 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
301 #ifdef NUMA
302 		m = NULL;
303 		if (vm_ndomains > 1) {
304 			m = vm_page_alloc_noobj_domain(
305 			    acpi_pxm_get_cpu_locality(cpu_apic_ids[cpu]), 0);
306 		}
307 		if (m == NULL)
308 #endif
309 			m = vm_page_alloc_noobj(0);
310 		if (m == NULL)
311 			panic("cannot alloc pcpu page for cpu %d", cpu);
312 		pmap_qenter((vm_offset_t)&__pcpu[cpu], &m, 1);
313 	}
314 }
315 
316 /*
317  * start each AP in our list
318  */
319 int
320 start_all_aps(void)
321 {
322 	vm_page_t m_boottramp, m_pml4, m_pdp, m_pd[4];
323 	pml5_entry_t old_pml45;
324 	pml4_entry_t *v_pml4;
325 	pdp_entry_t *v_pdp;
326 	pd_entry_t *v_pd;
327 	vm_paddr_t boot_address;
328 	u_int32_t mpbioswarmvec;
329 	int apic_id, cpu, domain, i;
330 	u_char mpbiosreason;
331 
332 	amd64_mp_alloc_pcpu();
333 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
334 
335 	MPASS(bootMP_size <= PAGE_SIZE);
336 	m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
337 	    (1ULL << 20), /* Trampoline should be below 1M for real mode */
338 	    PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
339 	boot_address = VM_PAGE_TO_PHYS(m_boottramp);
340 
341 	/* Create a transient 1:1 mapping of low 4G */
342 	if (la57) {
343 		m_pml4 = pmap_page_alloc_below_4g(true);
344 		v_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pml4));
345 	} else {
346 		v_pml4 = &kernel_pmap->pm_pmltop[0];
347 	}
348 	m_pdp = pmap_page_alloc_below_4g(true);
349 	v_pdp = (pdp_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pdp));
350 	m_pd[0] = pmap_page_alloc_below_4g(false);
351 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[0]));
352 	for (i = 0; i < NPDEPG; i++)
353 		v_pd[i] = (i << PDRSHIFT) | X86_PG_V | X86_PG_RW | X86_PG_A |
354 		    X86_PG_M | PG_PS;
355 	m_pd[1] = pmap_page_alloc_below_4g(false);
356 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[1]));
357 	for (i = 0; i < NPDEPG; i++)
358 		v_pd[i] = (NBPDP + (i << PDRSHIFT)) | X86_PG_V | X86_PG_RW |
359 		    X86_PG_A | X86_PG_M | PG_PS;
360 	m_pd[2] = pmap_page_alloc_below_4g(false);
361 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[2]));
362 	for (i = 0; i < NPDEPG; i++)
363 		v_pd[i] = (2UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
364 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
365 	m_pd[3] = pmap_page_alloc_below_4g(false);
366 	v_pd = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m_pd[3]));
367 	for (i = 0; i < NPDEPG; i++)
368 		v_pd[i] = (3UL * NBPDP + (i << PDRSHIFT)) | X86_PG_V |
369 		    X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS;
370 	v_pdp[0] = VM_PAGE_TO_PHYS(m_pd[0]) | X86_PG_V |
371 	    X86_PG_RW | X86_PG_A | X86_PG_M;
372 	v_pdp[1] = VM_PAGE_TO_PHYS(m_pd[1]) | X86_PG_V |
373 	    X86_PG_RW | X86_PG_A | X86_PG_M;
374 	v_pdp[2] = VM_PAGE_TO_PHYS(m_pd[2]) | X86_PG_V |
375 	    X86_PG_RW | X86_PG_A | X86_PG_M;
376 	v_pdp[3] = VM_PAGE_TO_PHYS(m_pd[3]) | X86_PG_V |
377 	    X86_PG_RW | X86_PG_A | X86_PG_M;
378 	old_pml45 = kernel_pmap->pm_pmltop[0];
379 	if (la57) {
380 		kernel_pmap->pm_pmltop[0] = VM_PAGE_TO_PHYS(m_pml4) |
381 		    X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M;
382 	}
383 	v_pml4[0] = VM_PAGE_TO_PHYS(m_pdp) | X86_PG_V |
384 	    X86_PG_RW | X86_PG_A | X86_PG_M;
385 	pmap_invalidate_all(kernel_pmap);
386 
387 	/* copy the AP 1st level boot code */
388 	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
389 	if (bootverbose)
390 		printf("AP boot address %#lx\n", boot_address);
391 
392 	/* save the current value of the warm-start vector */
393 	if (!efi_boot)
394 		mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
395 	outb(CMOS_REG, BIOS_RESET);
396 	mpbiosreason = inb(CMOS_DATA);
397 
398 	/* setup a vector to our boot code */
399 	if (!efi_boot) {
400 		*((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET;
401 		*((volatile u_short *)WARMBOOT_SEG) = (boot_address >> 4);
402 	}
403 	outb(CMOS_REG, BIOS_RESET);
404 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
405 
406 	/* start each AP */
407 	domain = 0;
408 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
409 		apic_id = cpu_apic_ids[cpu];
410 #ifdef NUMA
411 		if (vm_ndomains > 1)
412 			domain = acpi_pxm_get_cpu_locality(apic_id);
413 #endif
414 		/* allocate and set up an idle stack data page */
415 		bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
416 		    M_WAITOK | M_ZERO);
417 		doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
418 		    M_WAITOK | M_ZERO);
419 		mce_stack = kmem_malloc(MCE_STACK_SIZE,
420 		    M_WAITOK | M_ZERO);
421 		nmi_stack = kmem_malloc_domainset(
422 		    DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
423 		dbg_stack = kmem_malloc_domainset(
424 		    DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
425 		dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
426 		    DPCPU_SIZE, M_WAITOK | M_ZERO);
427 
428 		bootpcpu = &__pcpu[cpu];
429 		bootSTK = (char *)bootstacks[cpu] +
430 		    kstack_pages * PAGE_SIZE - 8;
431 		bootAP = cpu;
432 
433 		/* attempt to start the Application Processor */
434 		if (!start_ap(apic_id, boot_address)) {
435 			/* restore the warmstart vector */
436 			if (!efi_boot)
437 				*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
438 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
439 		}
440 
441 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
442 	}
443 
444 	/* restore the warmstart vector */
445 	if (!efi_boot)
446 		*(u_int32_t *)WARMBOOT_OFF = mpbioswarmvec;
447 
448 	outb(CMOS_REG, BIOS_RESET);
449 	outb(CMOS_DATA, mpbiosreason);
450 
451 	/* Destroy transient 1:1 mapping */
452 	kernel_pmap->pm_pmltop[0] = old_pml45;
453 	invlpg(0);
454 	if (la57)
455 		vm_page_free(m_pml4);
456 	vm_page_free(m_pd[3]);
457 	vm_page_free(m_pd[2]);
458 	vm_page_free(m_pd[1]);
459 	vm_page_free(m_pd[0]);
460 	vm_page_free(m_pdp);
461 	vm_page_free(m_boottramp);
462 
463 	/* number of APs actually started */
464 	return (mp_naps);
465 }
466 
467 /*
468  * This function starts the AP (application processor) identified
469  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
470  * to accomplish this.  This is necessary because of the nuances
471  * of the different hardware we might encounter.  It isn't pretty,
472  * but it seems to work.
473  */
474 static int
475 start_ap(int apic_id, vm_paddr_t boot_address)
476 {
477 	int vector, ms;
478 	int cpus;
479 
480 	/* calculate the vector */
481 	vector = (boot_address >> 12) & 0xff;
482 
483 	/* used as a watchpoint to signal AP startup */
484 	cpus = mp_naps;
485 
486 	ipi_startup(apic_id, vector);
487 
488 	/* Wait up to 5 seconds for it to start. */
489 	for (ms = 0; ms < 5000; ms++) {
490 		if (mp_naps > cpus)
491 			return 1;	/* return SUCCESS */
492 		DELAY(1000);
493 	}
494 	return 0;		/* return FAILURE */
495 }
496 
497 /*
498  * Flush the TLB on other CPU's
499  */
500 
501 /*
502  * Invalidation request.  PCPU pc_smp_tlb_op uses u_int instead of the
503  * enum to avoid both namespace and ABI issues (with enums).
504  */
505 enum invl_op_codes {
506       INVL_OP_TLB		= 1,
507       INVL_OP_TLB_INVPCID	= 2,
508       INVL_OP_TLB_INVPCID_PTI	= 3,
509       INVL_OP_TLB_PCID		= 4,
510       INVL_OP_PGRNG		= 5,
511       INVL_OP_PGRNG_INVPCID	= 6,
512       INVL_OP_PGRNG_PCID	= 7,
513       INVL_OP_PG		= 8,
514       INVL_OP_PG_INVPCID	= 9,
515       INVL_OP_PG_PCID		= 10,
516       INVL_OP_CACHE		= 11,
517 };
518 
519 /*
520  * These variables are initialized at startup to reflect how each of
521  * the different kinds of invalidations should be performed on the
522  * current machine and environment.
523  */
524 static enum invl_op_codes invl_op_tlb;
525 static enum invl_op_codes invl_op_pgrng;
526 static enum invl_op_codes invl_op_pg;
527 
528 /*
529  * Scoreboard of IPI completion notifications from target to IPI initiator.
530  *
531  * Each CPU can initiate shootdown IPI independently from other CPUs.
532  * Initiator enters critical section, then fills its local PCPU
533  * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
534  * at location (cpu, my_cpuid) for each target cpu.  After that IPI is
535  * sent to all targets which scan for zeroed scoreboard generation
536  * words.  Upon finding such word the shootdown data is read from
537  * corresponding cpu's pcpu, and generation is set.  Meantime initiator
538  * loops waiting for all zeroed generations in scoreboard to update.
539  */
540 static uint32_t *invl_scoreboard;
541 
542 static void
543 invl_scoreboard_init(void *arg __unused)
544 {
545 	u_int i;
546 
547 	invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
548 	    (mp_maxid + 1), M_DEVBUF, M_WAITOK);
549 	for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
550 		invl_scoreboard[i] = 1;
551 
552 	if (pmap_pcid_enabled) {
553 		if (invpcid_works) {
554 			if (pti)
555 				invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
556 			else
557 				invl_op_tlb = INVL_OP_TLB_INVPCID;
558 			invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
559 			invl_op_pg = INVL_OP_PG_INVPCID;
560 		} else {
561 			invl_op_tlb = INVL_OP_TLB_PCID;
562 			invl_op_pgrng = INVL_OP_PGRNG_PCID;
563 			invl_op_pg = INVL_OP_PG_PCID;
564 		}
565 	} else {
566 		invl_op_tlb = INVL_OP_TLB;
567 		invl_op_pgrng = INVL_OP_PGRNG;
568 		invl_op_pg = INVL_OP_PG;
569 	}
570 }
571 SYSINIT(invl_ops, SI_SUB_SMP - 1, SI_ORDER_ANY, invl_scoreboard_init, NULL);
572 
573 static uint32_t *
574 invl_scoreboard_getcpu(u_int cpu)
575 {
576 	return (invl_scoreboard + cpu * (mp_maxid + 1));
577 }
578 
579 static uint32_t *
580 invl_scoreboard_slot(u_int cpu)
581 {
582 	return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
583 }
584 
585 /*
586  * Used by the pmap to request cache or TLB invalidation on local and
587  * remote processors.  Mask provides the set of remote CPUs that are
588  * to be signalled with the invalidation IPI.  As an optimization, the
589  * curcpu_cb callback is invoked on the calling CPU in a critical
590  * section while waiting for the remote CPUs to complete the operation.
591  *
592  * The callback function is called unconditionally on the caller's
593  * underlying processor, even when this processor is not set in the
594  * mask.  So, the callback function must be prepared to handle such
595  * spurious invocations.
596  *
597  * Interrupts must be enabled when calling the function with smp
598  * started, to avoid deadlock with other IPIs that are protected with
599  * smp_ipi_mtx spinlock at the initiator side.
600  *
601  * Function must be called with the thread pinned, and it unpins on
602  * completion.
603  */
604 static void
605 smp_targeted_tlb_shootdown(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
606     smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
607 {
608 	cpuset_t mask;
609 	uint32_t generation, *p_cpudone;
610 	int cpu;
611 	bool is_all;
612 
613 	/*
614 	 * It is not necessary to signal other CPUs while booting or
615 	 * when in the debugger.
616 	 */
617 	if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
618 		goto local_cb;
619 
620 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
621 
622 	/*
623 	 * Make a stable copy of the set of CPUs on which the pmap is active.
624 	 * See if we have to interrupt other CPUs.
625 	 */
626 	CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
627 	is_all = CPU_CMP(&mask, &all_cpus) == 0;
628 	CPU_CLR(curcpu, &mask);
629 	if (CPU_EMPTY(&mask))
630 		goto local_cb;
631 
632 	/*
633 	 * Initiator must have interrupts enabled, which prevents
634 	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
635 	 * from deadlocking with us.  On the other hand, preemption
636 	 * must be disabled to pin initiator to the instance of the
637 	 * pcpu pc_smp_tlb data and scoreboard line.
638 	 */
639 	KASSERT((read_rflags() & PSL_I) != 0,
640 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
641 	critical_enter();
642 
643 	PCPU_SET(smp_tlb_addr1, addr1);
644 	PCPU_SET(smp_tlb_addr2, addr2);
645 	PCPU_SET(smp_tlb_pmap, pmap);
646 	generation = PCPU_GET(smp_tlb_gen);
647 	if (++generation == 0)
648 		generation = 1;
649 	PCPU_SET(smp_tlb_gen, generation);
650 	PCPU_SET(smp_tlb_op, op);
651 	/* Fence between filling smp_tlb fields and clearing scoreboard. */
652 	atomic_thread_fence_rel();
653 
654 	CPU_FOREACH_ISSET(cpu, &mask) {
655 		KASSERT(*invl_scoreboard_slot(cpu) != 0,
656 		    ("IPI scoreboard is zero, initiator %d target %d",
657 		    curcpu, cpu));
658 		*invl_scoreboard_slot(cpu) = 0;
659 	}
660 
661 	/*
662 	 * IPI acts as a fence between writing to the scoreboard above
663 	 * (zeroing slot) and reading from it below (wait for
664 	 * acknowledgment).
665 	 */
666 	if (is_all) {
667 		ipi_all_but_self(IPI_INVLOP);
668 	} else {
669 		ipi_selected(mask, IPI_INVLOP);
670 	}
671 	curcpu_cb(pmap, addr1, addr2);
672 	CPU_FOREACH_ISSET(cpu, &mask) {
673 		p_cpudone = invl_scoreboard_slot(cpu);
674 		while (atomic_load_int(p_cpudone) != generation)
675 			ia32_pause();
676 	}
677 
678 	/*
679 	 * Unpin before leaving critical section.  If the thread owes
680 	 * preemption, this allows scheduler to select thread on any
681 	 * CPU from its cpuset.
682 	 */
683 	sched_unpin();
684 	critical_exit();
685 
686 	return;
687 
688 local_cb:
689 	critical_enter();
690 	curcpu_cb(pmap, addr1, addr2);
691 	sched_unpin();
692 	critical_exit();
693 }
694 
695 void
696 smp_masked_invltlb(pmap_t pmap, smp_invl_cb_t curcpu_cb)
697 {
698 	smp_targeted_tlb_shootdown(pmap, 0, 0, curcpu_cb, invl_op_tlb);
699 #ifdef COUNT_XINVLTLB_HITS
700 	ipi_global++;
701 #endif
702 }
703 
704 void
705 smp_masked_invlpg(vm_offset_t addr, pmap_t pmap, smp_invl_cb_t curcpu_cb)
706 {
707 	smp_targeted_tlb_shootdown(pmap, addr, 0, curcpu_cb, invl_op_pg);
708 #ifdef COUNT_XINVLTLB_HITS
709 	ipi_page++;
710 #endif
711 }
712 
713 void
714 smp_masked_invlpg_range(vm_offset_t addr1, vm_offset_t addr2, pmap_t pmap,
715     smp_invl_cb_t curcpu_cb)
716 {
717 	smp_targeted_tlb_shootdown(pmap, addr1, addr2, curcpu_cb,
718 	    invl_op_pgrng);
719 #ifdef COUNT_XINVLTLB_HITS
720 	ipi_range++;
721 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
722 #endif
723 }
724 
725 void
726 smp_cache_flush(smp_invl_cb_t curcpu_cb)
727 {
728 	smp_targeted_tlb_shootdown(kernel_pmap, 0, 0, curcpu_cb, INVL_OP_CACHE);
729 }
730 
731 /*
732  * Handlers for TLB related IPIs
733  */
734 static void
735 invltlb_handler(pmap_t smp_tlb_pmap)
736 {
737 #ifdef COUNT_XINVLTLB_HITS
738 	xhits_gbl[PCPU_GET(cpuid)]++;
739 #endif /* COUNT_XINVLTLB_HITS */
740 #ifdef COUNT_IPIS
741 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
742 #endif /* COUNT_IPIS */
743 
744 	if (smp_tlb_pmap == kernel_pmap)
745 		invltlb_glob();
746 	else
747 		invltlb();
748 }
749 
750 static void
751 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
752 {
753 	struct invpcid_descr d;
754 
755 #ifdef COUNT_XINVLTLB_HITS
756 	xhits_gbl[PCPU_GET(cpuid)]++;
757 #endif /* COUNT_XINVLTLB_HITS */
758 #ifdef COUNT_IPIS
759 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
760 #endif /* COUNT_IPIS */
761 
762 	d.pcid = pmap_get_pcid(smp_tlb_pmap);
763 	d.pad = 0;
764 	d.addr = 0;
765 	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
766 	    INVPCID_CTX);
767 }
768 
769 static void
770 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
771 {
772 	struct invpcid_descr d;
773 
774 #ifdef COUNT_XINVLTLB_HITS
775 	xhits_gbl[PCPU_GET(cpuid)]++;
776 #endif /* COUNT_XINVLTLB_HITS */
777 #ifdef COUNT_IPIS
778 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
779 #endif /* COUNT_IPIS */
780 
781 	d.pcid = pmap_get_pcid(smp_tlb_pmap);
782 	d.pad = 0;
783 	d.addr = 0;
784 	if (smp_tlb_pmap == kernel_pmap) {
785 		/*
786 		 * This invalidation actually needs to clear kernel
787 		 * mappings from the TLB in the current pmap, but
788 		 * since we were asked for the flush in the kernel
789 		 * pmap, achieve it by performing global flush.
790 		 */
791 		invpcid(&d, INVPCID_CTXGLOB);
792 	} else {
793 		invpcid(&d, INVPCID_CTX);
794 		if (smp_tlb_pmap == PCPU_GET(curpmap) &&
795 		    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
796 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
797 	}
798 }
799 
800 static void
801 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
802 {
803 #ifdef COUNT_XINVLTLB_HITS
804 	xhits_gbl[PCPU_GET(cpuid)]++;
805 #endif /* COUNT_XINVLTLB_HITS */
806 #ifdef COUNT_IPIS
807 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
808 #endif /* COUNT_IPIS */
809 
810 	if (smp_tlb_pmap == kernel_pmap) {
811 		invltlb_glob();
812 	} else {
813 		/*
814 		 * The current pmap might not be equal to
815 		 * smp_tlb_pmap.  The clearing of the pm_gen in
816 		 * pmap_invalidate_all() takes care of TLB
817 		 * invalidation when switching to the pmap on this
818 		 * CPU.
819 		 */
820 		if (smp_tlb_pmap == PCPU_GET(curpmap)) {
821 			load_cr3(smp_tlb_pmap->pm_cr3 |
822 			    pmap_get_pcid(smp_tlb_pmap));
823 			if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
824 				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
825 		}
826 	}
827 }
828 
829 static void
830 invlpg_handler(vm_offset_t smp_tlb_addr1)
831 {
832 #ifdef COUNT_XINVLTLB_HITS
833 	xhits_pg[PCPU_GET(cpuid)]++;
834 #endif /* COUNT_XINVLTLB_HITS */
835 #ifdef COUNT_IPIS
836 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
837 #endif /* COUNT_IPIS */
838 
839 	invlpg(smp_tlb_addr1);
840 }
841 
842 static void
843 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
844 {
845 	struct invpcid_descr d;
846 
847 #ifdef COUNT_XINVLTLB_HITS
848 	xhits_pg[PCPU_GET(cpuid)]++;
849 #endif /* COUNT_XINVLTLB_HITS */
850 #ifdef COUNT_IPIS
851 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
852 #endif /* COUNT_IPIS */
853 
854 	pmap_invlpg(smp_tlb_pmap, smp_tlb_addr1);
855 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
856 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
857 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
858 		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
859 		d.pad = 0;
860 		d.addr = smp_tlb_addr1;
861 		invpcid(&d, INVPCID_ADDR);
862 	}
863 }
864 
865 static void
866 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
867 {
868 	uint64_t kcr3, ucr3;
869 	uint32_t pcid;
870 
871 #ifdef COUNT_XINVLTLB_HITS
872 	xhits_pg[PCPU_GET(cpuid)]++;
873 #endif /* COUNT_XINVLTLB_HITS */
874 #ifdef COUNT_IPIS
875 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
876 #endif /* COUNT_IPIS */
877 
878 	invlpg(smp_tlb_addr1);
879 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
880 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
881 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
882 		pcid = pmap_get_pcid(smp_tlb_pmap);
883 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
884 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
885 		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
886 	}
887 }
888 
889 static void
890 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
891 {
892 	vm_offset_t addr;
893 
894 #ifdef COUNT_XINVLTLB_HITS
895 	xhits_rng[PCPU_GET(cpuid)]++;
896 #endif /* COUNT_XINVLTLB_HITS */
897 #ifdef COUNT_IPIS
898 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
899 #endif /* COUNT_IPIS */
900 
901 	addr = smp_tlb_addr1;
902 	do {
903 		invlpg(addr);
904 		addr += PAGE_SIZE;
905 	} while (addr < smp_tlb_addr2);
906 }
907 
908 static void
909 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
910     vm_offset_t smp_tlb_addr2)
911 {
912 	struct invpcid_descr d;
913 	vm_offset_t addr;
914 
915 #ifdef COUNT_XINVLTLB_HITS
916 	xhits_rng[PCPU_GET(cpuid)]++;
917 #endif /* COUNT_XINVLTLB_HITS */
918 #ifdef COUNT_IPIS
919 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
920 #endif /* COUNT_IPIS */
921 
922 	addr = smp_tlb_addr1;
923 	if (smp_tlb_pmap == kernel_pmap && PCPU_GET(pcid_invlpg_workaround)) {
924 		struct invpcid_descr d = { 0 };
925 
926 		invpcid(&d, INVPCID_CTXGLOB);
927 	} else {
928 		do {
929 			invlpg(addr);
930 			addr += PAGE_SIZE;
931 		} while (addr < smp_tlb_addr2);
932 	}
933 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
934 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
935 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
936 		d.pcid = pmap_get_pcid(smp_tlb_pmap) | PMAP_PCID_USER_PT;
937 		d.pad = 0;
938 		d.addr = smp_tlb_addr1;
939 		do {
940 			invpcid(&d, INVPCID_ADDR);
941 			d.addr += PAGE_SIZE;
942 		} while (d.addr < smp_tlb_addr2);
943 	}
944 }
945 
946 static void
947 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
948     vm_offset_t smp_tlb_addr2)
949 {
950 	vm_offset_t addr;
951 	uint64_t kcr3, ucr3;
952 	uint32_t pcid;
953 
954 #ifdef COUNT_XINVLTLB_HITS
955 	xhits_rng[PCPU_GET(cpuid)]++;
956 #endif /* COUNT_XINVLTLB_HITS */
957 #ifdef COUNT_IPIS
958 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
959 #endif /* COUNT_IPIS */
960 
961 	addr = smp_tlb_addr1;
962 	do {
963 		invlpg(addr);
964 		addr += PAGE_SIZE;
965 	} while (addr < smp_tlb_addr2);
966 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
967 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
968 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
969 		pcid = pmap_get_pcid(smp_tlb_pmap);
970 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
971 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
972 		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, smp_tlb_addr2);
973 	}
974 }
975 
976 static void
977 invlcache_handler(void)
978 {
979 #ifdef COUNT_IPIS
980 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
981 #endif /* COUNT_IPIS */
982 	wbinvd();
983 }
984 
985 static void
986 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
987     vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
988 {
989 	switch (smp_tlb_op) {
990 	case INVL_OP_TLB:
991 		invltlb_handler(smp_tlb_pmap);
992 		break;
993 	case INVL_OP_TLB_INVPCID:
994 		invltlb_invpcid_handler(smp_tlb_pmap);
995 		break;
996 	case INVL_OP_TLB_INVPCID_PTI:
997 		invltlb_invpcid_pti_handler(smp_tlb_pmap);
998 		break;
999 	case INVL_OP_TLB_PCID:
1000 		invltlb_pcid_handler(smp_tlb_pmap);
1001 		break;
1002 	case INVL_OP_PGRNG:
1003 		invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1004 		break;
1005 	case INVL_OP_PGRNG_INVPCID:
1006 		invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1007 		    smp_tlb_addr2);
1008 		break;
1009 	case INVL_OP_PGRNG_PCID:
1010 		invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1011 		    smp_tlb_addr2);
1012 		break;
1013 	case INVL_OP_PG:
1014 		invlpg_handler(smp_tlb_addr1);
1015 		break;
1016 	case INVL_OP_PG_INVPCID:
1017 		invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1018 		break;
1019 	case INVL_OP_PG_PCID:
1020 		invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1021 		break;
1022 	case INVL_OP_CACHE:
1023 		invlcache_handler();
1024 		break;
1025 	default:
1026 		__assert_unreachable();
1027 		break;
1028 	}
1029 }
1030 
1031 void
1032 invlop_handler(void)
1033 {
1034 	struct pcpu *initiator_pc;
1035 	pmap_t smp_tlb_pmap;
1036 	vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1037 	u_int initiator_cpu_id;
1038 	enum invl_op_codes smp_tlb_op;
1039 	uint32_t *scoreboard, smp_tlb_gen;
1040 
1041 	scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1042 	for (;;) {
1043 		for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1044 		    initiator_cpu_id++) {
1045 			if (atomic_load_int(&scoreboard[initiator_cpu_id]) == 0)
1046 				break;
1047 		}
1048 		if (initiator_cpu_id > mp_maxid)
1049 			break;
1050 		initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1051 
1052 		/*
1053 		 * This acquire fence and its corresponding release
1054 		 * fence in smp_targeted_tlb_shootdown() is between
1055 		 * reading zero scoreboard slot and accessing PCPU of
1056 		 * initiator for pc_smp_tlb values.
1057 		 */
1058 		atomic_thread_fence_acq();
1059 		smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1060 		smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1061 		smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1062 		smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1063 		smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1064 
1065 		/*
1066 		 * Ensure that we do not make our scoreboard
1067 		 * notification visible to the initiator until the
1068 		 * pc_smp_tlb values are read.  The corresponding
1069 		 * fence is implicitly provided by the barrier in the
1070 		 * IPI send operation before the APIC ICR register
1071 		 * write.
1072 		 *
1073 		 * As an optimization, the request is acknowledged
1074 		 * before the actual invalidation is performed.  It is
1075 		 * safe because target CPU cannot return to userspace
1076 		 * before handler finishes. Only NMI can preempt the
1077 		 * handler, but NMI would see the kernel handler frame
1078 		 * and not touch not-invalidated user page table.
1079 		 */
1080 		atomic_thread_fence_acq();
1081 		atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1082 
1083 		invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1084 		    smp_tlb_addr2);
1085 	}
1086 }
1087