xref: /freebsd/sys/amd64/amd64/mp_machdep.c (revision 7cc42f6d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 1996, by Steve Passe
5  * Copyright (c) 2003, by Peter Wemm
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the developer may NOT be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include "opt_acpi.h"
33 #include "opt_cpu.h"
34 #include "opt_ddb.h"
35 #include "opt_kstack_pages.h"
36 #include "opt_sched.h"
37 #include "opt_smp.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/cpuset.h>
43 #include <sys/domainset.h>
44 #ifdef GPROF
45 #include <sys/gmon.h>
46 #endif
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/malloc.h>
52 #include <sys/memrange.h>
53 #include <sys/mutex.h>
54 #include <sys/pcpu.h>
55 #include <sys/proc.h>
56 #include <sys/sched.h>
57 #include <sys/smp.h>
58 #include <sys/sysctl.h>
59 
60 #include <vm/vm.h>
61 #include <vm/vm_param.h>
62 #include <vm/pmap.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_extern.h>
65 #include <vm/vm_page.h>
66 #include <vm/vm_phys.h>
67 
68 #include <x86/apicreg.h>
69 #include <machine/clock.h>
70 #include <machine/cputypes.h>
71 #include <machine/cpufunc.h>
72 #include <x86/mca.h>
73 #include <machine/md_var.h>
74 #include <machine/pcb.h>
75 #include <machine/psl.h>
76 #include <machine/smp.h>
77 #include <machine/specialreg.h>
78 #include <machine/tss.h>
79 #include <x86/ucode.h>
80 #include <machine/cpu.h>
81 #include <x86/init.h>
82 
83 #ifdef DEV_ACPI
84 #include <contrib/dev/acpica/include/acpi.h>
85 #include <dev/acpica/acpivar.h>
86 #endif
87 
88 #define WARMBOOT_TARGET		0
89 #define WARMBOOT_OFF		(KERNBASE + 0x0467)
90 #define WARMBOOT_SEG		(KERNBASE + 0x0469)
91 
92 #define CMOS_REG		(0x70)
93 #define CMOS_DATA		(0x71)
94 #define BIOS_RESET		(0x0f)
95 #define BIOS_WARM		(0x0a)
96 
97 #define GiB(v)			(v ## ULL << 30)
98 
99 #define	AP_BOOTPT_SZ		(PAGE_SIZE * 4)
100 
101 /* Temporary variables for init_secondary()  */
102 char *doublefault_stack;
103 char *mce_stack;
104 char *nmi_stack;
105 char *dbg_stack;
106 
107 extern u_int mptramp_la57;
108 
109 /*
110  * Local data and functions.
111  */
112 
113 static int	start_ap(int apic_id);
114 
115 static bool
116 is_kernel_paddr(vm_paddr_t pa)
117 {
118 
119 	return (pa >= trunc_2mpage(btext - KERNBASE) &&
120 	   pa < round_page(_end - KERNBASE));
121 }
122 
123 static bool
124 is_mpboot_good(vm_paddr_t start, vm_paddr_t end)
125 {
126 
127 	return (start + AP_BOOTPT_SZ <= GiB(4) && atop(end) < Maxmem);
128 }
129 
130 /*
131  * Calculate usable address in base memory for AP trampoline code.
132  */
133 void
134 mp_bootaddress(vm_paddr_t *physmap, unsigned int *physmap_idx)
135 {
136 	vm_paddr_t start, end;
137 	unsigned int i;
138 	bool allocated;
139 
140 	alloc_ap_trampoline(physmap, physmap_idx);
141 
142 	/*
143 	 * Find a memory region big enough below the 4GB boundary to
144 	 * store the initial page tables.  Region must be mapped by
145 	 * the direct map.
146 	 *
147 	 * Note that it needs to be aligned to a page boundary.
148 	 */
149 	allocated = false;
150 	for (i = *physmap_idx; i <= *physmap_idx; i -= 2) {
151 		/*
152 		 * First, try to chomp at the start of the physmap region.
153 		 * Kernel binary might claim it already.
154 		 */
155 		start = round_page(physmap[i]);
156 		end = start + AP_BOOTPT_SZ;
157 		if (start < end && end <= physmap[i + 1] &&
158 		    is_mpboot_good(start, end) &&
159 		    !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
160 			allocated = true;
161 			physmap[i] = end;
162 			break;
163 		}
164 
165 		/*
166 		 * Second, try to chomp at the end.  Again, check
167 		 * against kernel.
168 		 */
169 		end = trunc_page(physmap[i + 1]);
170 		start = end - AP_BOOTPT_SZ;
171 		if (start < end && start >= physmap[i] &&
172 		    is_mpboot_good(start, end) &&
173 		    !is_kernel_paddr(start) && !is_kernel_paddr(end - 1)) {
174 			allocated = true;
175 			physmap[i + 1] = start;
176 			break;
177 		}
178 	}
179 	if (allocated) {
180 		mptramp_pagetables = start;
181 		if (physmap[i] == physmap[i + 1] && *physmap_idx != 0) {
182 			memmove(&physmap[i], &physmap[i + 2],
183 			    sizeof(*physmap) * (*physmap_idx - i + 2));
184 			*physmap_idx -= 2;
185 		}
186 	} else {
187 		mptramp_pagetables = trunc_page(boot_address) - AP_BOOTPT_SZ;
188 		if (bootverbose)
189 			printf(
190 "Cannot find enough space for the initial AP page tables, placing them at %#x",
191 			    mptramp_pagetables);
192 	}
193 }
194 
195 /*
196  * Initialize the IPI handlers and start up the AP's.
197  */
198 void
199 cpu_mp_start(void)
200 {
201 	int i;
202 
203 	/* Initialize the logical ID to APIC ID table. */
204 	for (i = 0; i < MAXCPU; i++) {
205 		cpu_apic_ids[i] = -1;
206 	}
207 
208 	/* Install an inter-CPU IPI for cache and TLB invalidations. */
209 	setidt(IPI_INVLOP, pti ? IDTVEC(invlop_pti) : IDTVEC(invlop),
210 	    SDT_SYSIGT, SEL_KPL, 0);
211 
212 	/* Install an inter-CPU IPI for all-CPU rendezvous */
213 	setidt(IPI_RENDEZVOUS, pti ? IDTVEC(rendezvous_pti) :
214 	    IDTVEC(rendezvous), SDT_SYSIGT, SEL_KPL, 0);
215 
216 	/* Install generic inter-CPU IPI handler */
217 	setidt(IPI_BITMAP_VECTOR, pti ? IDTVEC(ipi_intr_bitmap_handler_pti) :
218 	    IDTVEC(ipi_intr_bitmap_handler), SDT_SYSIGT, SEL_KPL, 0);
219 
220 	/* Install an inter-CPU IPI for CPU stop/restart */
221 	setidt(IPI_STOP, pti ? IDTVEC(cpustop_pti) : IDTVEC(cpustop),
222 	    SDT_SYSIGT, SEL_KPL, 0);
223 
224 	/* Install an inter-CPU IPI for CPU suspend/resume */
225 	setidt(IPI_SUSPEND, pti ? IDTVEC(cpususpend_pti) : IDTVEC(cpususpend),
226 	    SDT_SYSIGT, SEL_KPL, 0);
227 
228 	/* Install an IPI for calling delayed SWI */
229 	setidt(IPI_SWI, pti ? IDTVEC(ipi_swi_pti) : IDTVEC(ipi_swi),
230 	    SDT_SYSIGT, SEL_KPL, 0);
231 
232 	/* Set boot_cpu_id if needed. */
233 	if (boot_cpu_id == -1) {
234 		boot_cpu_id = PCPU_GET(apic_id);
235 		cpu_info[boot_cpu_id].cpu_bsp = 1;
236 	} else
237 		KASSERT(boot_cpu_id == PCPU_GET(apic_id),
238 		    ("BSP's APIC ID doesn't match boot_cpu_id"));
239 
240 	/* Probe logical/physical core configuration. */
241 	topo_probe();
242 
243 	assign_cpu_ids();
244 
245 	mptramp_la57 = la57;
246 
247 	/* Start each Application Processor */
248 	init_ops.start_all_aps();
249 
250 	set_interrupt_apic_ids();
251 
252 #if defined(DEV_ACPI) && MAXMEMDOM > 1
253 	acpi_pxm_set_cpu_locality();
254 #endif
255 }
256 
257 /*
258  * AP CPU's call this to initialize themselves.
259  */
260 void
261 init_secondary(void)
262 {
263 	struct pcpu *pc;
264 	struct nmi_pcpu *np;
265 	struct user_segment_descriptor *gdt;
266 	struct region_descriptor ap_gdt;
267 	u_int64_t cr0;
268 	int cpu, gsel_tss, x;
269 
270 	/* Set by the startup code for us to use */
271 	cpu = bootAP;
272 
273 	/* Update microcode before doing anything else. */
274 	ucode_load_ap(cpu);
275 
276 	/* Get per-cpu data and save  */
277 	pc = &__pcpu[cpu];
278 
279 	/* prime data page for it to use */
280 	pcpu_init(pc, cpu, sizeof(struct pcpu));
281 	dpcpu_init(dpcpu, cpu);
282 	pc->pc_apic_id = cpu_apic_ids[cpu];
283 	pc->pc_prvspace = pc;
284 	pc->pc_curthread = 0;
285 	pc->pc_tssp = &pc->pc_common_tss;
286 	pc->pc_rsp0 = 0;
287 	pc->pc_pti_rsp0 = (((vm_offset_t)&pc->pc_pti_stack +
288 	    PC_PTI_STACK_SZ * sizeof(uint64_t)) & ~0xful);
289 	gdt = pc->pc_gdt;
290 	pc->pc_tss = (struct system_segment_descriptor *)&gdt[GPROC0_SEL];
291 	pc->pc_fs32p = &gdt[GUFS32_SEL];
292 	pc->pc_gs32p = &gdt[GUGS32_SEL];
293 	pc->pc_ldt = (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL];
294 	pc->pc_ucr3_load_mask = PMAP_UCR3_NOMASK;
295 	/* See comment in pmap_bootstrap(). */
296 	pc->pc_pcid_next = PMAP_PCID_KERN + 2;
297 	pc->pc_pcid_gen = 1;
298 
299 	pc->pc_smp_tlb_gen = 1;
300 
301 	/* Init tss */
302 	pc->pc_common_tss = __pcpu[0].pc_common_tss;
303 	pc->pc_common_tss.tss_iobase = sizeof(struct amd64tss) +
304 	    IOPERM_BITMAP_SIZE;
305 	pc->pc_common_tss.tss_rsp0 = 0;
306 
307 	/* The doublefault stack runs on IST1. */
308 	np = ((struct nmi_pcpu *)&doublefault_stack[PAGE_SIZE]) - 1;
309 	np->np_pcpu = (register_t)pc;
310 	pc->pc_common_tss.tss_ist1 = (long)np;
311 
312 	/* The NMI stack runs on IST2. */
313 	np = ((struct nmi_pcpu *) &nmi_stack[PAGE_SIZE]) - 1;
314 	np->np_pcpu = (register_t)pc;
315 	pc->pc_common_tss.tss_ist2 = (long)np;
316 
317 	/* The MC# stack runs on IST3. */
318 	np = ((struct nmi_pcpu *) &mce_stack[PAGE_SIZE]) - 1;
319 	np->np_pcpu = (register_t)pc;
320 	pc->pc_common_tss.tss_ist3 = (long)np;
321 
322 	/* The DB# stack runs on IST4. */
323 	np = ((struct nmi_pcpu *) &dbg_stack[PAGE_SIZE]) - 1;
324 	np->np_pcpu = (register_t)pc;
325 	pc->pc_common_tss.tss_ist4 = (long)np;
326 
327 	/* Prepare private GDT */
328 	gdt_segs[GPROC0_SEL].ssd_base = (long)&pc->pc_common_tss;
329 	for (x = 0; x < NGDT; x++) {
330 		if (x != GPROC0_SEL && x != GPROC0_SEL + 1 &&
331 		    x != GUSERLDT_SEL && x != GUSERLDT_SEL + 1)
332 			ssdtosd(&gdt_segs[x], &gdt[x]);
333 	}
334 	ssdtosyssd(&gdt_segs[GPROC0_SEL],
335 	    (struct system_segment_descriptor *)&gdt[GPROC0_SEL]);
336 	ap_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
337 	ap_gdt.rd_base = (u_long)gdt;
338 	lgdt(&ap_gdt);			/* does magic intra-segment return */
339 
340 	wrmsr(MSR_FSBASE, 0);		/* User value */
341 	wrmsr(MSR_GSBASE, (u_int64_t)pc);
342 	wrmsr(MSR_KGSBASE, (u_int64_t)pc);	/* XXX User value while we're in the kernel */
343 	fix_cpuid();
344 
345 	lidt(&r_idt);
346 
347 	gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
348 	ltr(gsel_tss);
349 
350 	/*
351 	 * Set to a known state:
352 	 * Set by mpboot.s: CR0_PG, CR0_PE
353 	 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM
354 	 */
355 	cr0 = rcr0();
356 	cr0 &= ~(CR0_CD | CR0_NW | CR0_EM);
357 	load_cr0(cr0);
358 
359 	amd64_conf_fast_syscall();
360 
361 	/* signal our startup to the BSP. */
362 	mp_naps++;
363 
364 	/* Spin until the BSP releases the AP's. */
365 	while (atomic_load_acq_int(&aps_ready) == 0)
366 		ia32_pause();
367 
368 	init_secondary_tail();
369 }
370 
371 /*******************************************************************
372  * local functions and data
373  */
374 
375 #ifdef NUMA
376 static void
377 mp_realloc_pcpu(int cpuid, int domain)
378 {
379 	vm_page_t m;
380 	vm_offset_t oa, na;
381 
382 	oa = (vm_offset_t)&__pcpu[cpuid];
383 	if (_vm_phys_domain(pmap_kextract(oa)) == domain)
384 		return;
385 	m = vm_page_alloc_domain(NULL, 0, domain,
386 	    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ);
387 	if (m == NULL)
388 		return;
389 	na = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
390 	pagecopy((void *)oa, (void *)na);
391 	pmap_qenter((vm_offset_t)&__pcpu[cpuid], &m, 1);
392 	/* XXX old pcpu page leaked. */
393 }
394 #endif
395 
396 /*
397  * start each AP in our list
398  */
399 int
400 native_start_all_aps(void)
401 {
402 	u_int64_t *pt5, *pt4, *pt3, *pt2;
403 	u_int32_t mpbioswarmvec;
404 	int apic_id, cpu, domain, i, xo;
405 	u_char mpbiosreason;
406 
407 	mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
408 
409 	/* copy the AP 1st level boot code */
410 	bcopy(mptramp_start, (void *)PHYS_TO_DMAP(boot_address), bootMP_size);
411 
412 	/* Locate the page tables, they'll be below the trampoline */
413 	if (la57) {
414 		pt5 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables);
415 		xo = 1;
416 	} else {
417 		xo = 0;
418 	}
419 	pt4 = (uint64_t *)PHYS_TO_DMAP(mptramp_pagetables + xo * PAGE_SIZE);
420 	pt3 = pt4 + (PAGE_SIZE) / sizeof(u_int64_t);
421 	pt2 = pt3 + (PAGE_SIZE) / sizeof(u_int64_t);
422 
423 	/* Create the initial 1GB replicated page tables */
424 	for (i = 0; i < 512; i++) {
425 		if (la57) {
426 			pt5[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables +
427 			    PAGE_SIZE);
428 			pt5[i] |= PG_V | PG_RW | PG_U;
429 		}
430 
431 		/*
432 		 * Each slot of the level 4 pages points to the same
433 		 * level 3 page.
434 		 */
435 		pt4[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables +
436 		    (xo + 1) * PAGE_SIZE);
437 		pt4[i] |= PG_V | PG_RW | PG_U;
438 
439 		/*
440 		 * Each slot of the level 3 pages points to the same
441 		 * level 2 page.
442 		 */
443 		pt3[i] = (u_int64_t)(uintptr_t)(mptramp_pagetables +
444 		    ((xo + 2) * PAGE_SIZE));
445 		pt3[i] |= PG_V | PG_RW | PG_U;
446 
447 		/* The level 2 page slots are mapped with 2MB pages for 1GB. */
448 		pt2[i] = i * (2 * 1024 * 1024);
449 		pt2[i] |= PG_V | PG_RW | PG_PS | PG_U;
450 	}
451 
452 	/* save the current value of the warm-start vector */
453 	mpbioswarmvec = *((u_int32_t *) WARMBOOT_OFF);
454 	outb(CMOS_REG, BIOS_RESET);
455 	mpbiosreason = inb(CMOS_DATA);
456 
457 	/* setup a vector to our boot code */
458 	*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
459 	*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
460 	outb(CMOS_REG, BIOS_RESET);
461 	outb(CMOS_DATA, BIOS_WARM);	/* 'warm-start' */
462 
463 	/* Relocate pcpu areas to the correct domain. */
464 #ifdef NUMA
465 	if (vm_ndomains > 1)
466 		for (cpu = 1; cpu < mp_ncpus; cpu++) {
467 			apic_id = cpu_apic_ids[cpu];
468 			domain = acpi_pxm_get_cpu_locality(apic_id);
469 			mp_realloc_pcpu(cpu, domain);
470 		}
471 #endif
472 
473 	/* start each AP */
474 	domain = 0;
475 	for (cpu = 1; cpu < mp_ncpus; cpu++) {
476 		apic_id = cpu_apic_ids[cpu];
477 #ifdef NUMA
478 		if (vm_ndomains > 1)
479 			domain = acpi_pxm_get_cpu_locality(apic_id);
480 #endif
481 		/* allocate and set up an idle stack data page */
482 		bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
483 		    M_WAITOK | M_ZERO);
484 		doublefault_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK |
485 		    M_ZERO);
486 		mce_stack = (char *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO);
487 		nmi_stack = (char *)kmem_malloc_domainset(
488 		    DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
489 		dbg_stack = (char *)kmem_malloc_domainset(
490 		    DOMAINSET_PREF(domain), PAGE_SIZE, M_WAITOK | M_ZERO);
491 		dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
492 		    DPCPU_SIZE, M_WAITOK | M_ZERO);
493 
494 		bootSTK = (char *)bootstacks[cpu] +
495 		    kstack_pages * PAGE_SIZE - 8;
496 		bootAP = cpu;
497 
498 		/* attempt to start the Application Processor */
499 		if (!start_ap(apic_id)) {
500 			/* restore the warmstart vector */
501 			*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
502 			panic("AP #%d (PHY# %d) failed!", cpu, apic_id);
503 		}
504 
505 		CPU_SET(cpu, &all_cpus);	/* record AP in CPU map */
506 	}
507 
508 	/* restore the warmstart vector */
509 	*(u_int32_t *) WARMBOOT_OFF = mpbioswarmvec;
510 
511 	outb(CMOS_REG, BIOS_RESET);
512 	outb(CMOS_DATA, mpbiosreason);
513 
514 	/* number of APs actually started */
515 	return (mp_naps);
516 }
517 
518 /*
519  * This function starts the AP (application processor) identified
520  * by the APIC ID 'physicalCpu'.  It does quite a "song and dance"
521  * to accomplish this.  This is necessary because of the nuances
522  * of the different hardware we might encounter.  It isn't pretty,
523  * but it seems to work.
524  */
525 static int
526 start_ap(int apic_id)
527 {
528 	int vector, ms;
529 	int cpus;
530 
531 	/* calculate the vector */
532 	vector = (boot_address >> 12) & 0xff;
533 
534 	/* used as a watchpoint to signal AP startup */
535 	cpus = mp_naps;
536 
537 	ipi_startup(apic_id, vector);
538 
539 	/* Wait up to 5 seconds for it to start. */
540 	for (ms = 0; ms < 5000; ms++) {
541 		if (mp_naps > cpus)
542 			return 1;	/* return SUCCESS */
543 		DELAY(1000);
544 	}
545 	return 0;		/* return FAILURE */
546 }
547 
548 /*
549  * Flush the TLB on other CPU's
550  */
551 
552 /*
553  * Invalidation request.  PCPU pc_smp_tlb_op uses u_int instead of the
554  * enum to avoid both namespace and ABI issues (with enums).
555  */
556 enum invl_op_codes {
557       INVL_OP_TLB		= 1,
558       INVL_OP_TLB_INVPCID	= 2,
559       INVL_OP_TLB_INVPCID_PTI	= 3,
560       INVL_OP_TLB_PCID		= 4,
561       INVL_OP_PGRNG		= 5,
562       INVL_OP_PGRNG_INVPCID	= 6,
563       INVL_OP_PGRNG_PCID	= 7,
564       INVL_OP_PG		= 8,
565       INVL_OP_PG_INVPCID	= 9,
566       INVL_OP_PG_PCID		= 10,
567       INVL_OP_CACHE		= 11,
568 };
569 
570 /*
571  * These variables are initialized at startup to reflect how each of
572  * the different kinds of invalidations should be performed on the
573  * current machine and environment.
574  */
575 static enum invl_op_codes invl_op_tlb;
576 static enum invl_op_codes invl_op_pgrng;
577 static enum invl_op_codes invl_op_pg;
578 
579 /*
580  * Scoreboard of IPI completion notifications from target to IPI initiator.
581  *
582  * Each CPU can initiate shootdown IPI independently from other CPUs.
583  * Initiator enters critical section, then fills its local PCPU
584  * shootdown info (pc_smp_tlb_ vars), then clears scoreboard generation
585  * at location (cpu, my_cpuid) for each target cpu.  After that IPI is
586  * sent to all targets which scan for zeroed scoreboard generation
587  * words.  Upon finding such word the shootdown data is read from
588  * corresponding cpu's pcpu, and generation is set.  Meantime initiator
589  * loops waiting for all zeroed generations in scoreboard to update.
590  */
591 static uint32_t *invl_scoreboard;
592 
593 static void
594 invl_scoreboard_init(void *arg __unused)
595 {
596 	u_int i;
597 
598 	invl_scoreboard = malloc(sizeof(uint32_t) * (mp_maxid + 1) *
599 	    (mp_maxid + 1), M_DEVBUF, M_WAITOK);
600 	for (i = 0; i < (mp_maxid + 1) * (mp_maxid + 1); i++)
601 		invl_scoreboard[i] = 1;
602 
603 	if (pmap_pcid_enabled) {
604 		if (invpcid_works) {
605 			if (pti)
606 				invl_op_tlb = INVL_OP_TLB_INVPCID_PTI;
607 			else
608 				invl_op_tlb = INVL_OP_TLB_INVPCID;
609 			invl_op_pgrng = INVL_OP_PGRNG_INVPCID;
610 			invl_op_pg = INVL_OP_PG_INVPCID;
611 		} else {
612 			invl_op_tlb = INVL_OP_TLB_PCID;
613 			invl_op_pgrng = INVL_OP_PGRNG_PCID;
614 			invl_op_pg = INVL_OP_PG_PCID;
615 		}
616 	} else {
617 		invl_op_tlb = INVL_OP_TLB;
618 		invl_op_pgrng = INVL_OP_PGRNG;
619 		invl_op_pg = INVL_OP_PG;
620 	}
621 }
622 SYSINIT(invl_ops, SI_SUB_SMP, SI_ORDER_FIRST, invl_scoreboard_init, NULL);
623 
624 static uint32_t *
625 invl_scoreboard_getcpu(u_int cpu)
626 {
627 	return (invl_scoreboard + cpu * (mp_maxid + 1));
628 }
629 
630 static uint32_t *
631 invl_scoreboard_slot(u_int cpu)
632 {
633 	return (invl_scoreboard_getcpu(cpu) + PCPU_GET(cpuid));
634 }
635 
636 /*
637  * Used by pmap to request cache or TLB invalidation on local and
638  * remote processors.  Mask provides the set of remote CPUs which are
639  * to be signalled with the invalidation IPI.  As an optimization, the
640  * curcpu_cb callback is invoked on the calling CPU while waiting for
641  * remote CPUs to complete the operation.
642  *
643  * The callback function is called unconditionally on the caller's
644  * underlying processor, even when this processor is not set in the
645  * mask.  So, the callback function must be prepared to handle such
646  * spurious invocations.
647  *
648  * Interrupts must be enabled when calling the function with smp
649  * started, to avoid deadlock with other IPIs that are protected with
650  * smp_ipi_mtx spinlock at the initiator side.
651  */
652 static void
653 smp_targeted_tlb_shootdown(cpuset_t mask, pmap_t pmap, vm_offset_t addr1,
654     vm_offset_t addr2, smp_invl_cb_t curcpu_cb, enum invl_op_codes op)
655 {
656 	cpuset_t other_cpus, mask1;
657 	uint32_t generation, *p_cpudone;
658 	int cpu;
659 
660 	/*
661 	 * It is not necessary to signal other CPUs while booting or
662 	 * when in the debugger.
663 	 */
664 	if (kdb_active || KERNEL_PANICKED() || !smp_started) {
665 		curcpu_cb(pmap, addr1, addr2);
666 		return;
667 	}
668 
669 	sched_pin();
670 
671 	/*
672 	 * Check for other cpus.  Return if none.
673 	 */
674 	if (CPU_ISFULLSET(&mask)) {
675 		if (mp_ncpus <= 1)
676 			goto nospinexit;
677 	} else {
678 		CPU_CLR(PCPU_GET(cpuid), &mask);
679 		if (CPU_EMPTY(&mask))
680 			goto nospinexit;
681 	}
682 
683 	/*
684 	 * Initiator must have interrupts enabled, which prevents
685 	 * non-invalidation IPIs that take smp_ipi_mtx spinlock,
686 	 * from deadlocking with us.  On the other hand, preemption
687 	 * must be disabled to pin initiator to the instance of the
688 	 * pcpu pc_smp_tlb data and scoreboard line.
689 	 */
690 	KASSERT((read_rflags() & PSL_I) != 0,
691 	    ("smp_targeted_tlb_shootdown: interrupts disabled"));
692 	critical_enter();
693 
694 	PCPU_SET(smp_tlb_addr1, addr1);
695 	PCPU_SET(smp_tlb_addr2, addr2);
696 	PCPU_SET(smp_tlb_pmap, pmap);
697 	generation = PCPU_GET(smp_tlb_gen);
698 	if (++generation == 0)
699 		generation = 1;
700 	PCPU_SET(smp_tlb_gen, generation);
701 	PCPU_SET(smp_tlb_op, op);
702 	/* Fence between filling smp_tlb fields and clearing scoreboard. */
703 	atomic_thread_fence_rel();
704 
705 	mask1 = mask;
706 	while ((cpu = CPU_FFS(&mask1)) != 0) {
707 		cpu--;
708 		CPU_CLR(cpu, &mask1);
709 		KASSERT(*invl_scoreboard_slot(cpu) != 0,
710 		    ("IPI scoreboard is zero, initiator %d target %d",
711 		    PCPU_GET(cpuid), cpu));
712 		*invl_scoreboard_slot(cpu) = 0;
713 	}
714 
715 	/*
716 	 * IPI acts as a fence between writing to the scoreboard above
717 	 * (zeroing slot) and reading from it below (wait for
718 	 * acknowledgment).
719 	 */
720 	if (CPU_ISFULLSET(&mask)) {
721 		ipi_all_but_self(IPI_INVLOP);
722 		other_cpus = all_cpus;
723 		CPU_CLR(PCPU_GET(cpuid), &other_cpus);
724 	} else {
725 		other_cpus = mask;
726 		ipi_selected(mask, IPI_INVLOP);
727 	}
728 	curcpu_cb(pmap, addr1, addr2);
729 	while ((cpu = CPU_FFS(&other_cpus)) != 0) {
730 		cpu--;
731 		CPU_CLR(cpu, &other_cpus);
732 		p_cpudone = invl_scoreboard_slot(cpu);
733 		while (atomic_load_int(p_cpudone) != generation)
734 			ia32_pause();
735 	}
736 	critical_exit();
737 	sched_unpin();
738 	return;
739 
740 nospinexit:
741 	curcpu_cb(pmap, addr1, addr2);
742 	sched_unpin();
743 }
744 
745 void
746 smp_masked_invltlb(cpuset_t mask, pmap_t pmap, smp_invl_cb_t curcpu_cb)
747 {
748 	smp_targeted_tlb_shootdown(mask, pmap, 0, 0, curcpu_cb, invl_op_tlb);
749 #ifdef COUNT_XINVLTLB_HITS
750 	ipi_global++;
751 #endif
752 }
753 
754 void
755 smp_masked_invlpg(cpuset_t mask, vm_offset_t addr, pmap_t pmap,
756     smp_invl_cb_t curcpu_cb)
757 {
758 	smp_targeted_tlb_shootdown(mask, pmap, addr, 0, curcpu_cb, invl_op_pg);
759 #ifdef COUNT_XINVLTLB_HITS
760 	ipi_page++;
761 #endif
762 }
763 
764 void
765 smp_masked_invlpg_range(cpuset_t mask, vm_offset_t addr1, vm_offset_t addr2,
766     pmap_t pmap, smp_invl_cb_t curcpu_cb)
767 {
768 	smp_targeted_tlb_shootdown(mask, pmap, addr1, addr2, curcpu_cb,
769 	    invl_op_pgrng);
770 #ifdef COUNT_XINVLTLB_HITS
771 	ipi_range++;
772 	ipi_range_size += (addr2 - addr1) / PAGE_SIZE;
773 #endif
774 }
775 
776 void
777 smp_cache_flush(smp_invl_cb_t curcpu_cb)
778 {
779 	smp_targeted_tlb_shootdown(all_cpus, NULL, 0, 0, curcpu_cb,
780 	    INVL_OP_CACHE);
781 }
782 
783 /*
784  * Handlers for TLB related IPIs
785  */
786 static void
787 invltlb_handler(pmap_t smp_tlb_pmap)
788 {
789 #ifdef COUNT_XINVLTLB_HITS
790 	xhits_gbl[PCPU_GET(cpuid)]++;
791 #endif /* COUNT_XINVLTLB_HITS */
792 #ifdef COUNT_IPIS
793 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
794 #endif /* COUNT_IPIS */
795 
796 	if (smp_tlb_pmap == kernel_pmap)
797 		invltlb_glob();
798 	else
799 		invltlb();
800 }
801 
802 static void
803 invltlb_invpcid_handler(pmap_t smp_tlb_pmap)
804 {
805 	struct invpcid_descr d;
806 
807 #ifdef COUNT_XINVLTLB_HITS
808 	xhits_gbl[PCPU_GET(cpuid)]++;
809 #endif /* COUNT_XINVLTLB_HITS */
810 #ifdef COUNT_IPIS
811 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
812 #endif /* COUNT_IPIS */
813 
814 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
815 	d.pad = 0;
816 	d.addr = 0;
817 	invpcid(&d, smp_tlb_pmap == kernel_pmap ? INVPCID_CTXGLOB :
818 	    INVPCID_CTX);
819 }
820 
821 static void
822 invltlb_invpcid_pti_handler(pmap_t smp_tlb_pmap)
823 {
824 	struct invpcid_descr d;
825 
826 #ifdef COUNT_XINVLTLB_HITS
827 	xhits_gbl[PCPU_GET(cpuid)]++;
828 #endif /* COUNT_XINVLTLB_HITS */
829 #ifdef COUNT_IPIS
830 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
831 #endif /* COUNT_IPIS */
832 
833 	d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
834 	d.pad = 0;
835 	d.addr = 0;
836 	if (smp_tlb_pmap == kernel_pmap) {
837 		/*
838 		 * This invalidation actually needs to clear kernel
839 		 * mappings from the TLB in the current pmap, but
840 		 * since we were asked for the flush in the kernel
841 		 * pmap, achieve it by performing global flush.
842 		 */
843 		invpcid(&d, INVPCID_CTXGLOB);
844 	} else {
845 		invpcid(&d, INVPCID_CTX);
846 		if (smp_tlb_pmap == PCPU_GET(curpmap))
847 			PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
848 	}
849 }
850 
851 static void
852 invltlb_pcid_handler(pmap_t smp_tlb_pmap)
853 {
854 	uint32_t pcid;
855 
856 #ifdef COUNT_XINVLTLB_HITS
857 	xhits_gbl[PCPU_GET(cpuid)]++;
858 #endif /* COUNT_XINVLTLB_HITS */
859 #ifdef COUNT_IPIS
860 	(*ipi_invltlb_counts[PCPU_GET(cpuid)])++;
861 #endif /* COUNT_IPIS */
862 
863 	if (smp_tlb_pmap == kernel_pmap) {
864 		invltlb_glob();
865 	} else {
866 		/*
867 		 * The current pmap might not be equal to
868 		 * smp_tlb_pmap.  The clearing of the pm_gen in
869 		 * pmap_invalidate_all() takes care of TLB
870 		 * invalidation when switching to the pmap on this
871 		 * CPU.
872 		 */
873 		if (smp_tlb_pmap == PCPU_GET(curpmap)) {
874 			pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
875 			load_cr3(smp_tlb_pmap->pm_cr3 | pcid);
876 			if (smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3)
877 				PCPU_SET(ucr3_load_mask, ~CR3_PCID_SAVE);
878 		}
879 	}
880 }
881 
882 static void
883 invlpg_handler(vm_offset_t smp_tlb_addr1)
884 {
885 #ifdef COUNT_XINVLTLB_HITS
886 	xhits_pg[PCPU_GET(cpuid)]++;
887 #endif /* COUNT_XINVLTLB_HITS */
888 #ifdef COUNT_IPIS
889 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
890 #endif /* COUNT_IPIS */
891 
892 	invlpg(smp_tlb_addr1);
893 }
894 
895 static void
896 invlpg_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
897 {
898 	struct invpcid_descr d;
899 
900 #ifdef COUNT_XINVLTLB_HITS
901 	xhits_pg[PCPU_GET(cpuid)]++;
902 #endif /* COUNT_XINVLTLB_HITS */
903 #ifdef COUNT_IPIS
904 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
905 #endif /* COUNT_IPIS */
906 
907 	invlpg(smp_tlb_addr1);
908 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
909 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
910 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
911 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
912 		    PMAP_PCID_USER_PT;
913 		d.pad = 0;
914 		d.addr = smp_tlb_addr1;
915 		invpcid(&d, INVPCID_ADDR);
916 	}
917 }
918 
919 static void
920 invlpg_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1)
921 {
922 	uint64_t kcr3, ucr3;
923 	uint32_t pcid;
924 
925 #ifdef COUNT_XINVLTLB_HITS
926 	xhits_pg[PCPU_GET(cpuid)]++;
927 #endif /* COUNT_XINVLTLB_HITS */
928 #ifdef COUNT_IPIS
929 	(*ipi_invlpg_counts[PCPU_GET(cpuid)])++;
930 #endif /* COUNT_IPIS */
931 
932 	invlpg(smp_tlb_addr1);
933 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
934 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
935 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
936 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
937 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
938 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
939 		pmap_pti_pcid_invlpg(ucr3, kcr3, smp_tlb_addr1);
940 	}
941 }
942 
943 static void
944 invlrng_handler(vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
945 {
946 	vm_offset_t addr, addr2;
947 
948 #ifdef COUNT_XINVLTLB_HITS
949 	xhits_rng[PCPU_GET(cpuid)]++;
950 #endif /* COUNT_XINVLTLB_HITS */
951 #ifdef COUNT_IPIS
952 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
953 #endif /* COUNT_IPIS */
954 
955 	addr = smp_tlb_addr1;
956 	addr2 = smp_tlb_addr2;
957 	do {
958 		invlpg(addr);
959 		addr += PAGE_SIZE;
960 	} while (addr < addr2);
961 }
962 
963 static void
964 invlrng_invpcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
965     vm_offset_t smp_tlb_addr2)
966 {
967 	struct invpcid_descr d;
968 	vm_offset_t addr, addr2;
969 
970 #ifdef COUNT_XINVLTLB_HITS
971 	xhits_rng[PCPU_GET(cpuid)]++;
972 #endif /* COUNT_XINVLTLB_HITS */
973 #ifdef COUNT_IPIS
974 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
975 #endif /* COUNT_IPIS */
976 
977 	addr = smp_tlb_addr1;
978 	addr2 = smp_tlb_addr2;
979 	do {
980 		invlpg(addr);
981 		addr += PAGE_SIZE;
982 	} while (addr < addr2);
983 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
984 	    smp_tlb_pmap->pm_ucr3 != PMAP_NO_CR3 &&
985 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
986 		d.pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid |
987 		    PMAP_PCID_USER_PT;
988 		d.pad = 0;
989 		d.addr = smp_tlb_addr1;
990 		do {
991 			invpcid(&d, INVPCID_ADDR);
992 			d.addr += PAGE_SIZE;
993 		} while (d.addr < addr2);
994 	}
995 }
996 
997 static void
998 invlrng_pcid_handler(pmap_t smp_tlb_pmap, vm_offset_t smp_tlb_addr1,
999     vm_offset_t smp_tlb_addr2)
1000 {
1001 	vm_offset_t addr, addr2;
1002 	uint64_t kcr3, ucr3;
1003 	uint32_t pcid;
1004 
1005 #ifdef COUNT_XINVLTLB_HITS
1006 	xhits_rng[PCPU_GET(cpuid)]++;
1007 #endif /* COUNT_XINVLTLB_HITS */
1008 #ifdef COUNT_IPIS
1009 	(*ipi_invlrng_counts[PCPU_GET(cpuid)])++;
1010 #endif /* COUNT_IPIS */
1011 
1012 	addr = smp_tlb_addr1;
1013 	addr2 = smp_tlb_addr2;
1014 	do {
1015 		invlpg(addr);
1016 		addr += PAGE_SIZE;
1017 	} while (addr < addr2);
1018 	if (smp_tlb_pmap == PCPU_GET(curpmap) &&
1019 	    (ucr3 = smp_tlb_pmap->pm_ucr3) != PMAP_NO_CR3 &&
1020 	    PCPU_GET(ucr3_load_mask) == PMAP_UCR3_NOMASK) {
1021 		pcid = smp_tlb_pmap->pm_pcids[PCPU_GET(cpuid)].pm_pcid;
1022 		kcr3 = smp_tlb_pmap->pm_cr3 | pcid | CR3_PCID_SAVE;
1023 		ucr3 |= pcid | PMAP_PCID_USER_PT | CR3_PCID_SAVE;
1024 		pmap_pti_pcid_invlrng(ucr3, kcr3, smp_tlb_addr1, addr2);
1025 	}
1026 }
1027 
1028 static void
1029 invlcache_handler(void)
1030 {
1031 #ifdef COUNT_IPIS
1032 	(*ipi_invlcache_counts[PCPU_GET(cpuid)])++;
1033 #endif /* COUNT_IPIS */
1034 	wbinvd();
1035 }
1036 
1037 static void
1038 invlop_handler_one_req(enum invl_op_codes smp_tlb_op, pmap_t smp_tlb_pmap,
1039     vm_offset_t smp_tlb_addr1, vm_offset_t smp_tlb_addr2)
1040 {
1041 	switch (smp_tlb_op) {
1042 	case INVL_OP_TLB:
1043 		invltlb_handler(smp_tlb_pmap);
1044 		break;
1045 	case INVL_OP_TLB_INVPCID:
1046 		invltlb_invpcid_handler(smp_tlb_pmap);
1047 		break;
1048 	case INVL_OP_TLB_INVPCID_PTI:
1049 		invltlb_invpcid_pti_handler(smp_tlb_pmap);
1050 		break;
1051 	case INVL_OP_TLB_PCID:
1052 		invltlb_pcid_handler(smp_tlb_pmap);
1053 		break;
1054 	case INVL_OP_PGRNG:
1055 		invlrng_handler(smp_tlb_addr1, smp_tlb_addr2);
1056 		break;
1057 	case INVL_OP_PGRNG_INVPCID:
1058 		invlrng_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1059 		    smp_tlb_addr2);
1060 		break;
1061 	case INVL_OP_PGRNG_PCID:
1062 		invlrng_pcid_handler(smp_tlb_pmap, smp_tlb_addr1,
1063 		    smp_tlb_addr2);
1064 		break;
1065 	case INVL_OP_PG:
1066 		invlpg_handler(smp_tlb_addr1);
1067 		break;
1068 	case INVL_OP_PG_INVPCID:
1069 		invlpg_invpcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1070 		break;
1071 	case INVL_OP_PG_PCID:
1072 		invlpg_pcid_handler(smp_tlb_pmap, smp_tlb_addr1);
1073 		break;
1074 	case INVL_OP_CACHE:
1075 		invlcache_handler();
1076 		break;
1077 	default:
1078 		__assert_unreachable();
1079 		break;
1080 	}
1081 }
1082 
1083 void
1084 invlop_handler(void)
1085 {
1086 	struct pcpu *initiator_pc;
1087 	pmap_t smp_tlb_pmap;
1088 	vm_offset_t smp_tlb_addr1, smp_tlb_addr2;
1089 	u_int initiator_cpu_id;
1090 	enum invl_op_codes smp_tlb_op;
1091 	uint32_t *scoreboard, smp_tlb_gen;
1092 
1093 	scoreboard = invl_scoreboard_getcpu(PCPU_GET(cpuid));
1094 	for (;;) {
1095 		for (initiator_cpu_id = 0; initiator_cpu_id <= mp_maxid;
1096 		    initiator_cpu_id++) {
1097 			if (scoreboard[initiator_cpu_id] == 0)
1098 				break;
1099 		}
1100 		if (initiator_cpu_id > mp_maxid)
1101 			break;
1102 		initiator_pc = cpuid_to_pcpu[initiator_cpu_id];
1103 
1104 		/*
1105 		 * This acquire fence and its corresponding release
1106 		 * fence in smp_targeted_tlb_shootdown() is between
1107 		 * reading zero scoreboard slot and accessing PCPU of
1108 		 * initiator for pc_smp_tlb values.
1109 		 */
1110 		atomic_thread_fence_acq();
1111 		smp_tlb_pmap = initiator_pc->pc_smp_tlb_pmap;
1112 		smp_tlb_addr1 = initiator_pc->pc_smp_tlb_addr1;
1113 		smp_tlb_addr2 = initiator_pc->pc_smp_tlb_addr2;
1114 		smp_tlb_op = initiator_pc->pc_smp_tlb_op;
1115 		smp_tlb_gen = initiator_pc->pc_smp_tlb_gen;
1116 
1117 		/*
1118 		 * Ensure that we do not make our scoreboard
1119 		 * notification visible to the initiator until the
1120 		 * pc_smp_tlb values are read.  The corresponding
1121 		 * fence is implicitly provided by the barrier in the
1122 		 * IPI send operation before the APIC ICR register
1123 		 * write.
1124 		 *
1125 		 * As an optimization, the request is acknowledged
1126 		 * before the actual invalidation is performed.  It is
1127 		 * safe because target CPU cannot return to userspace
1128 		 * before handler finishes. Only NMI can preempt the
1129 		 * handler, but NMI would see the kernel handler frame
1130 		 * and not touch not-invalidated user page table.
1131 		 */
1132 		atomic_thread_fence_acq();
1133 		atomic_store_int(&scoreboard[initiator_cpu_id], smp_tlb_gen);
1134 
1135 		invlop_handler_one_req(smp_tlb_op, smp_tlb_pmap, smp_tlb_addr1,
1136 		    smp_tlb_addr2);
1137 	}
1138 }
1139