17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
5ab761399Sesaxe * Common Development and Distribution License (the "License").
6ab761399Sesaxe * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
220e751525SEric Saxe * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
237c478bd9Sstevel@tonic-gate * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate */
257c478bd9Sstevel@tonic-gate
26455e370cSJohn Levon /*
27c3377ee9SJohn Levon * Copyright 2019 Joyent, Inc.
28455e370cSJohn Levon */
29455e370cSJohn Levon
307c478bd9Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
317c478bd9Sstevel@tonic-gate /* All Rights Reserved */
327c478bd9Sstevel@tonic-gate
337c478bd9Sstevel@tonic-gate
347c478bd9Sstevel@tonic-gate #include <sys/types.h>
357c478bd9Sstevel@tonic-gate #include <sys/param.h>
367c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
377c478bd9Sstevel@tonic-gate #include <sys/signal.h>
387c478bd9Sstevel@tonic-gate #include <sys/user.h>
397c478bd9Sstevel@tonic-gate #include <sys/systm.h>
407c478bd9Sstevel@tonic-gate #include <sys/sysinfo.h>
417c478bd9Sstevel@tonic-gate #include <sys/var.h>
427c478bd9Sstevel@tonic-gate #include <sys/errno.h>
437c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
447c478bd9Sstevel@tonic-gate #include <sys/debug.h>
457c478bd9Sstevel@tonic-gate #include <sys/inline.h>
467c478bd9Sstevel@tonic-gate #include <sys/disp.h>
477c478bd9Sstevel@tonic-gate #include <sys/class.h>
487c478bd9Sstevel@tonic-gate #include <sys/bitmap.h>
497c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
507c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
517c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
527c478bd9Sstevel@tonic-gate #include <sys/cpupart.h>
537c478bd9Sstevel@tonic-gate #include <sys/lgrp.h>
54fb2f18f8Sesaxe #include <sys/pg.h>
55fb2f18f8Sesaxe #include <sys/cmt.h>
56fb2f18f8Sesaxe #include <sys/bitset.h>
577c478bd9Sstevel@tonic-gate #include <sys/schedctl.h>
587c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
597c478bd9Sstevel@tonic-gate #include <sys/dtrace.h>
607c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
61057452c6Sjj209869 #include <sys/archsystm.h>
62c3377ee9SJohn Levon #include <sys/smt.h>
637c478bd9Sstevel@tonic-gate
647c478bd9Sstevel@tonic-gate #include <vm/as.h>
657c478bd9Sstevel@tonic-gate
667c478bd9Sstevel@tonic-gate #define BOUND_CPU 0x1
677c478bd9Sstevel@tonic-gate #define BOUND_PARTITION 0x2
687c478bd9Sstevel@tonic-gate #define BOUND_INTR 0x4
697c478bd9Sstevel@tonic-gate
707c478bd9Sstevel@tonic-gate /* Dispatch queue allocation structure and functions */
717c478bd9Sstevel@tonic-gate struct disp_queue_info {
727c478bd9Sstevel@tonic-gate disp_t *dp;
737c478bd9Sstevel@tonic-gate dispq_t *olddispq;
747c478bd9Sstevel@tonic-gate dispq_t *newdispq;
757c478bd9Sstevel@tonic-gate ulong_t *olddqactmap;
767c478bd9Sstevel@tonic-gate ulong_t *newdqactmap;
777c478bd9Sstevel@tonic-gate int oldnglobpris;
787c478bd9Sstevel@tonic-gate };
797c478bd9Sstevel@tonic-gate static void disp_dq_alloc(struct disp_queue_info *dptr, int numpris,
807c478bd9Sstevel@tonic-gate disp_t *dp);
817c478bd9Sstevel@tonic-gate static void disp_dq_assign(struct disp_queue_info *dptr, int numpris);
827c478bd9Sstevel@tonic-gate static void disp_dq_free(struct disp_queue_info *dptr);
837c478bd9Sstevel@tonic-gate
847c478bd9Sstevel@tonic-gate /* platform-specific routine to call when processor is idle */
857c478bd9Sstevel@tonic-gate static void generic_idle_cpu();
867c478bd9Sstevel@tonic-gate void (*idle_cpu)() = generic_idle_cpu;
877c478bd9Sstevel@tonic-gate
887c478bd9Sstevel@tonic-gate /* routines invoked when a CPU enters/exits the idle loop */
897c478bd9Sstevel@tonic-gate static void idle_enter();
907c478bd9Sstevel@tonic-gate static void idle_exit();
917c478bd9Sstevel@tonic-gate
927c478bd9Sstevel@tonic-gate /* platform-specific routine to call when thread is enqueued */
937c478bd9Sstevel@tonic-gate static void generic_enq_thread(cpu_t *, int);
947c478bd9Sstevel@tonic-gate void (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread;
957c478bd9Sstevel@tonic-gate
967c478bd9Sstevel@tonic-gate pri_t kpreemptpri; /* priority where kernel preemption applies */
977c478bd9Sstevel@tonic-gate pri_t upreemptpri = 0; /* priority where normal preemption applies */
987c478bd9Sstevel@tonic-gate pri_t intr_pri; /* interrupt thread priority base level */
997c478bd9Sstevel@tonic-gate
100685679f7Sakolb #define KPQPRI -1 /* pri where cpu affinity is dropped for kpq */
1017c478bd9Sstevel@tonic-gate pri_t kpqpri = KPQPRI; /* can be set in /etc/system */
1027c478bd9Sstevel@tonic-gate disp_t cpu0_disp; /* boot CPU's dispatch queue */
1037c478bd9Sstevel@tonic-gate disp_lock_t swapped_lock; /* lock swapped threads and swap queue */
1047c478bd9Sstevel@tonic-gate int nswapped; /* total number of swapped threads */
1057c478bd9Sstevel@tonic-gate void disp_swapped_enq(kthread_t *tp);
1067c478bd9Sstevel@tonic-gate static void disp_swapped_setrun(kthread_t *tp);
1077c478bd9Sstevel@tonic-gate static void cpu_resched(cpu_t *cp, pri_t tpri);
1087c478bd9Sstevel@tonic-gate
1097c478bd9Sstevel@tonic-gate /*
1107c478bd9Sstevel@tonic-gate * If this is set, only interrupt threads will cause kernel preemptions.
1117c478bd9Sstevel@tonic-gate * This is done by changing the value of kpreemptpri. kpreemptpri
1126d40a71eSBryan Cantrill * will either be the max sysclass pri or the min interrupt pri.
1137c478bd9Sstevel@tonic-gate */
1147c478bd9Sstevel@tonic-gate int only_intr_kpreempt;
1157c478bd9Sstevel@tonic-gate
1167c478bd9Sstevel@tonic-gate extern void set_idle_cpu(int cpun);
1177c478bd9Sstevel@tonic-gate extern void unset_idle_cpu(int cpun);
1187c478bd9Sstevel@tonic-gate static void setkpdq(kthread_t *tp, int borf);
1197c478bd9Sstevel@tonic-gate #define SETKP_BACK 0
1207c478bd9Sstevel@tonic-gate #define SETKP_FRONT 1
1217c478bd9Sstevel@tonic-gate /*
1227c478bd9Sstevel@tonic-gate * Parameter that determines how recently a thread must have run
1237c478bd9Sstevel@tonic-gate * on the CPU to be considered loosely-bound to that CPU to reduce
1247c478bd9Sstevel@tonic-gate * cold cache effects. The interval is in hertz.
1257c478bd9Sstevel@tonic-gate */
126fb2f18f8Sesaxe #define RECHOOSE_INTERVAL 3
1277c478bd9Sstevel@tonic-gate int rechoose_interval = RECHOOSE_INTERVAL;
1287c478bd9Sstevel@tonic-gate
129685679f7Sakolb /*
130685679f7Sakolb * Parameter that determines how long (in nanoseconds) a thread must
131685679f7Sakolb * be sitting on a run queue before it can be stolen by another CPU
132685679f7Sakolb * to reduce migrations. The interval is in nanoseconds.
133685679f7Sakolb *
13481588590Sbholler * The nosteal_nsec should be set by platform code cmp_set_nosteal_interval()
13581588590Sbholler * to an appropriate value. nosteal_nsec is set to NOSTEAL_UNINITIALIZED
13681588590Sbholler * here indicating it is uninitiallized.
13781588590Sbholler * Setting nosteal_nsec to 0 effectively disables the nosteal 'protection'.
13881588590Sbholler *
139685679f7Sakolb */
14081588590Sbholler #define NOSTEAL_UNINITIALIZED (-1)
14181588590Sbholler hrtime_t nosteal_nsec = NOSTEAL_UNINITIALIZED;
14281588590Sbholler extern void cmp_set_nosteal_interval(void);
143685679f7Sakolb
144*bbf21555SRichard Lowe id_t defaultcid; /* system "default" class; see dispadmin(8) */
1457c478bd9Sstevel@tonic-gate
1467c478bd9Sstevel@tonic-gate disp_lock_t transition_lock; /* lock on transitioning threads */
1477c478bd9Sstevel@tonic-gate disp_lock_t stop_lock; /* lock on stopped threads */
1487c478bd9Sstevel@tonic-gate
1497c478bd9Sstevel@tonic-gate static void cpu_dispqalloc(int numpris);
1507c478bd9Sstevel@tonic-gate
151685679f7Sakolb /*
152685679f7Sakolb * This gets returned by disp_getwork/disp_getbest if we couldn't steal
153685679f7Sakolb * a thread because it was sitting on its run queue for a very short
154685679f7Sakolb * period of time.
155685679f7Sakolb */
156685679f7Sakolb #define T_DONTSTEAL (kthread_t *)(-1) /* returned by disp_getwork/getbest */
157685679f7Sakolb
1587c478bd9Sstevel@tonic-gate static kthread_t *disp_getwork(cpu_t *to);
1597c478bd9Sstevel@tonic-gate static kthread_t *disp_getbest(disp_t *from);
1607c478bd9Sstevel@tonic-gate static kthread_t *disp_ratify(kthread_t *tp, disp_t *kpq);
1617c478bd9Sstevel@tonic-gate
1627c478bd9Sstevel@tonic-gate void swtch_to(kthread_t *);
1637c478bd9Sstevel@tonic-gate
1647c478bd9Sstevel@tonic-gate /*
1657c478bd9Sstevel@tonic-gate * dispatcher and scheduler initialization
1667c478bd9Sstevel@tonic-gate */
1677c478bd9Sstevel@tonic-gate
1687c478bd9Sstevel@tonic-gate /*
1697c478bd9Sstevel@tonic-gate * disp_setup - Common code to calculate and allocate dispatcher
1707c478bd9Sstevel@tonic-gate * variables and structures based on the maximum priority.
1717c478bd9Sstevel@tonic-gate */
1727c478bd9Sstevel@tonic-gate static void
disp_setup(pri_t maxglobpri,pri_t oldnglobpris)1737c478bd9Sstevel@tonic-gate disp_setup(pri_t maxglobpri, pri_t oldnglobpris)
1747c478bd9Sstevel@tonic-gate {
1757c478bd9Sstevel@tonic-gate pri_t newnglobpris;
1767c478bd9Sstevel@tonic-gate
1777c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
1787c478bd9Sstevel@tonic-gate
1797c478bd9Sstevel@tonic-gate newnglobpris = maxglobpri + 1 + LOCK_LEVEL;
1807c478bd9Sstevel@tonic-gate
1817c478bd9Sstevel@tonic-gate if (newnglobpris > oldnglobpris) {
1827c478bd9Sstevel@tonic-gate /*
1837c478bd9Sstevel@tonic-gate * Allocate new kp queues for each CPU partition.
1847c478bd9Sstevel@tonic-gate */
1857c478bd9Sstevel@tonic-gate cpupart_kpqalloc(newnglobpris);
1867c478bd9Sstevel@tonic-gate
1877c478bd9Sstevel@tonic-gate /*
1887c478bd9Sstevel@tonic-gate * Allocate new dispatch queues for each CPU.
1897c478bd9Sstevel@tonic-gate */
1907c478bd9Sstevel@tonic-gate cpu_dispqalloc(newnglobpris);
1917c478bd9Sstevel@tonic-gate
1927c478bd9Sstevel@tonic-gate /*
1937c478bd9Sstevel@tonic-gate * compute new interrupt thread base priority
1947c478bd9Sstevel@tonic-gate */
1957c478bd9Sstevel@tonic-gate intr_pri = maxglobpri;
1967c478bd9Sstevel@tonic-gate if (only_intr_kpreempt) {
1977c478bd9Sstevel@tonic-gate kpreemptpri = intr_pri + 1;
1987c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI)
1997c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri;
2007c478bd9Sstevel@tonic-gate }
2017c478bd9Sstevel@tonic-gate v.v_nglobpris = newnglobpris;
2027c478bd9Sstevel@tonic-gate }
2037c478bd9Sstevel@tonic-gate }
2047c478bd9Sstevel@tonic-gate
2057c478bd9Sstevel@tonic-gate /*
2067c478bd9Sstevel@tonic-gate * dispinit - Called to initialize all loaded classes and the
2077c478bd9Sstevel@tonic-gate * dispatcher framework.
2087c478bd9Sstevel@tonic-gate */
2097c478bd9Sstevel@tonic-gate void
dispinit(void)2107c478bd9Sstevel@tonic-gate dispinit(void)
2117c478bd9Sstevel@tonic-gate {
2127c478bd9Sstevel@tonic-gate id_t cid;
2137c478bd9Sstevel@tonic-gate pri_t maxglobpri;
2147c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri;
2157c478bd9Sstevel@tonic-gate
2167c478bd9Sstevel@tonic-gate maxglobpri = -1;
2177c478bd9Sstevel@tonic-gate
2187c478bd9Sstevel@tonic-gate /*
2197c478bd9Sstevel@tonic-gate * Initialize transition lock, which will always be set.
2207c478bd9Sstevel@tonic-gate */
2217c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&transition_lock);
2227c478bd9Sstevel@tonic-gate disp_lock_enter_high(&transition_lock);
2237c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&stop_lock);
2247c478bd9Sstevel@tonic-gate
2257c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock);
2267c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_maxrunpri = -1;
2277c478bd9Sstevel@tonic-gate CPU->cpu_disp->disp_max_unbound_pri = -1;
228fb2f18f8Sesaxe
2297c478bd9Sstevel@tonic-gate /*
2307c478bd9Sstevel@tonic-gate * Initialize the default CPU partition.
2317c478bd9Sstevel@tonic-gate */
2327c478bd9Sstevel@tonic-gate cpupart_initialize_default();
2337c478bd9Sstevel@tonic-gate /*
2347c478bd9Sstevel@tonic-gate * Call the class specific initialization functions for
2357c478bd9Sstevel@tonic-gate * all pre-installed schedulers.
2367c478bd9Sstevel@tonic-gate *
2377c478bd9Sstevel@tonic-gate * We pass the size of a class specific parameter
2387c478bd9Sstevel@tonic-gate * buffer to each of the initialization functions
2397c478bd9Sstevel@tonic-gate * to try to catch problems with backward compatibility
2407c478bd9Sstevel@tonic-gate * of class modules.
2417c478bd9Sstevel@tonic-gate *
2427c478bd9Sstevel@tonic-gate * For example a new class module running on an old system
2437c478bd9Sstevel@tonic-gate * which didn't provide sufficiently large parameter buffers
2447c478bd9Sstevel@tonic-gate * would be bad news. Class initialization modules can check for
2457c478bd9Sstevel@tonic-gate * this and take action if they detect a problem.
2467c478bd9Sstevel@tonic-gate */
2477c478bd9Sstevel@tonic-gate
2487c478bd9Sstevel@tonic-gate for (cid = 0; cid < nclass; cid++) {
2497c478bd9Sstevel@tonic-gate sclass_t *sc;
2507c478bd9Sstevel@tonic-gate
2517c478bd9Sstevel@tonic-gate sc = &sclass[cid];
2527c478bd9Sstevel@tonic-gate if (SCHED_INSTALLED(sc)) {
2537c478bd9Sstevel@tonic-gate cl_maxglobpri = sc->cl_init(cid, PC_CLPARMSZ,
2547c478bd9Sstevel@tonic-gate &sc->cl_funcs);
2557c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri)
2567c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri;
2577c478bd9Sstevel@tonic-gate }
2587c478bd9Sstevel@tonic-gate }
2596d40a71eSBryan Cantrill
2606d40a71eSBryan Cantrill /*
2616d40a71eSBryan Cantrill * Historically, kpreemptpri was set to v_maxsyspri + 1 -- which is
2626d40a71eSBryan Cantrill * to say, maxclsyspri + 1. However, over time, the system has used
2636d40a71eSBryan Cantrill * more and more asynchronous kernel threads, with an increasing number
2646d40a71eSBryan Cantrill * of these doing work on direct behalf of higher-level software (e.g.,
2656d40a71eSBryan Cantrill * network processing). This has led to potential priority inversions:
2666d40a71eSBryan Cantrill * threads doing low-priority lengthy kernel work can effectively
2676d40a71eSBryan Cantrill * delay kernel-level processing of higher-priority data. To minimize
2686d40a71eSBryan Cantrill * such inversions, we set kpreemptpri to be v_maxsyspri; anything in
2696d40a71eSBryan Cantrill * the kernel that runs at maxclsyspri will therefore induce kernel
2706d40a71eSBryan Cantrill * preemption, and this priority should be used if/when an asynchronous
2716d40a71eSBryan Cantrill * thread (or, as is often the case, task queue) is performing a task
2726d40a71eSBryan Cantrill * on behalf of higher-level software (or any task that is otherwise
2736d40a71eSBryan Cantrill * latency-sensitve).
2746d40a71eSBryan Cantrill */
2756d40a71eSBryan Cantrill kpreemptpri = (pri_t)v.v_maxsyspri;
2767c478bd9Sstevel@tonic-gate if (kpqpri == KPQPRI)
2777c478bd9Sstevel@tonic-gate kpqpri = kpreemptpri;
2787c478bd9Sstevel@tonic-gate
2797c478bd9Sstevel@tonic-gate ASSERT(maxglobpri >= 0);
2807c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, 0);
2817c478bd9Sstevel@tonic-gate
2827c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock);
2837c478bd9Sstevel@tonic-gate
2847c478bd9Sstevel@tonic-gate /*
28581588590Sbholler * Platform specific sticky scheduler setup.
28681588590Sbholler */
28781588590Sbholler if (nosteal_nsec == NOSTEAL_UNINITIALIZED)
28881588590Sbholler cmp_set_nosteal_interval();
28981588590Sbholler
29081588590Sbholler /*
2917c478bd9Sstevel@tonic-gate * Get the default class ID; this may be later modified via
292*bbf21555SRichard Lowe * dispadmin(8). This will load the class (normally TS) and that will
2937c478bd9Sstevel@tonic-gate * call disp_add(), which is why we had to drop cpu_lock first.
2947c478bd9Sstevel@tonic-gate */
2957c478bd9Sstevel@tonic-gate if (getcid(defaultclass, &defaultcid) != 0) {
2967c478bd9Sstevel@tonic-gate cmn_err(CE_PANIC, "Couldn't load default scheduling class '%s'",
2977c478bd9Sstevel@tonic-gate defaultclass);
2987c478bd9Sstevel@tonic-gate }
2997c478bd9Sstevel@tonic-gate }
3007c478bd9Sstevel@tonic-gate
3017c478bd9Sstevel@tonic-gate /*
3027c478bd9Sstevel@tonic-gate * disp_add - Called with class pointer to initialize the dispatcher
3037c478bd9Sstevel@tonic-gate * for a newly loaded class.
3047c478bd9Sstevel@tonic-gate */
3057c478bd9Sstevel@tonic-gate void
disp_add(sclass_t * clp)3067c478bd9Sstevel@tonic-gate disp_add(sclass_t *clp)
3077c478bd9Sstevel@tonic-gate {
3087c478bd9Sstevel@tonic-gate pri_t maxglobpri;
3097c478bd9Sstevel@tonic-gate pri_t cl_maxglobpri;
3107c478bd9Sstevel@tonic-gate
3117c478bd9Sstevel@tonic-gate mutex_enter(&cpu_lock);
3127c478bd9Sstevel@tonic-gate /*
3137c478bd9Sstevel@tonic-gate * Initialize the scheduler class.
3147c478bd9Sstevel@tonic-gate */
3157c478bd9Sstevel@tonic-gate maxglobpri = (pri_t)(v.v_nglobpris - LOCK_LEVEL - 1);
3167c478bd9Sstevel@tonic-gate cl_maxglobpri = clp->cl_init(clp - sclass, PC_CLPARMSZ, &clp->cl_funcs);
3177c478bd9Sstevel@tonic-gate if (cl_maxglobpri > maxglobpri)
3187c478bd9Sstevel@tonic-gate maxglobpri = cl_maxglobpri;
3197c478bd9Sstevel@tonic-gate
3207c478bd9Sstevel@tonic-gate /*
3217c478bd9Sstevel@tonic-gate * Save old queue information. Since we're initializing a
3227c478bd9Sstevel@tonic-gate * new scheduling class which has just been loaded, then
3237c478bd9Sstevel@tonic-gate * the size of the dispq may have changed. We need to handle
3247c478bd9Sstevel@tonic-gate * that here.
3257c478bd9Sstevel@tonic-gate */
3267c478bd9Sstevel@tonic-gate disp_setup(maxglobpri, v.v_nglobpris);
3277c478bd9Sstevel@tonic-gate
3287c478bd9Sstevel@tonic-gate mutex_exit(&cpu_lock);
3297c478bd9Sstevel@tonic-gate }
3307c478bd9Sstevel@tonic-gate
3317c478bd9Sstevel@tonic-gate
3327c478bd9Sstevel@tonic-gate /*
3337c478bd9Sstevel@tonic-gate * For each CPU, allocate new dispatch queues
3347c478bd9Sstevel@tonic-gate * with the stated number of priorities.
3357c478bd9Sstevel@tonic-gate */
3367c478bd9Sstevel@tonic-gate static void
cpu_dispqalloc(int numpris)3377c478bd9Sstevel@tonic-gate cpu_dispqalloc(int numpris)
3387c478bd9Sstevel@tonic-gate {
3397c478bd9Sstevel@tonic-gate cpu_t *cpup;
3407c478bd9Sstevel@tonic-gate struct disp_queue_info *disp_mem;
3417c478bd9Sstevel@tonic-gate int i, num;
3427c478bd9Sstevel@tonic-gate
3437c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
3447c478bd9Sstevel@tonic-gate
3457c478bd9Sstevel@tonic-gate disp_mem = kmem_zalloc(NCPU *
3467c478bd9Sstevel@tonic-gate sizeof (struct disp_queue_info), KM_SLEEP);
3477c478bd9Sstevel@tonic-gate
3487c478bd9Sstevel@tonic-gate /*
3497c478bd9Sstevel@tonic-gate * This routine must allocate all of the memory before stopping
3507c478bd9Sstevel@tonic-gate * the cpus because it must not sleep in kmem_alloc while the
3517c478bd9Sstevel@tonic-gate * CPUs are stopped. Locks they hold will not be freed until they
3527c478bd9Sstevel@tonic-gate * are restarted.
3537c478bd9Sstevel@tonic-gate */
3547c478bd9Sstevel@tonic-gate i = 0;
3557c478bd9Sstevel@tonic-gate cpup = cpu_list;
3567c478bd9Sstevel@tonic-gate do {
3577c478bd9Sstevel@tonic-gate disp_dq_alloc(&disp_mem[i], numpris, cpup->cpu_disp);
3587c478bd9Sstevel@tonic-gate i++;
3597c478bd9Sstevel@tonic-gate cpup = cpup->cpu_next;
3607c478bd9Sstevel@tonic-gate } while (cpup != cpu_list);
3617c478bd9Sstevel@tonic-gate num = i;
3627c478bd9Sstevel@tonic-gate
3630ed5c46eSJosef 'Jeff' Sipek pause_cpus(NULL, NULL);
3647c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++)
3657c478bd9Sstevel@tonic-gate disp_dq_assign(&disp_mem[i], numpris);
3667c478bd9Sstevel@tonic-gate start_cpus();
3677c478bd9Sstevel@tonic-gate
3687c478bd9Sstevel@tonic-gate /*
3697c478bd9Sstevel@tonic-gate * I must free all of the memory after starting the cpus because
3707c478bd9Sstevel@tonic-gate * I can not risk sleeping in kmem_free while the cpus are stopped.
3717c478bd9Sstevel@tonic-gate */
3727c478bd9Sstevel@tonic-gate for (i = 0; i < num; i++)
3737c478bd9Sstevel@tonic-gate disp_dq_free(&disp_mem[i]);
3747c478bd9Sstevel@tonic-gate
3757c478bd9Sstevel@tonic-gate kmem_free(disp_mem, NCPU * sizeof (struct disp_queue_info));
3767c478bd9Sstevel@tonic-gate }
3777c478bd9Sstevel@tonic-gate
3787c478bd9Sstevel@tonic-gate static void
disp_dq_alloc(struct disp_queue_info * dptr,int numpris,disp_t * dp)3797c478bd9Sstevel@tonic-gate disp_dq_alloc(struct disp_queue_info *dptr, int numpris, disp_t *dp)
3807c478bd9Sstevel@tonic-gate {
3817c478bd9Sstevel@tonic-gate dptr->newdispq = kmem_zalloc(numpris * sizeof (dispq_t), KM_SLEEP);
3827c478bd9Sstevel@tonic-gate dptr->newdqactmap = kmem_zalloc(((numpris / BT_NBIPUL) + 1) *
3837c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP);
3847c478bd9Sstevel@tonic-gate dptr->dp = dp;
3857c478bd9Sstevel@tonic-gate }
3867c478bd9Sstevel@tonic-gate
3877c478bd9Sstevel@tonic-gate static void
disp_dq_assign(struct disp_queue_info * dptr,int numpris)3887c478bd9Sstevel@tonic-gate disp_dq_assign(struct disp_queue_info *dptr, int numpris)
3897c478bd9Sstevel@tonic-gate {
3907c478bd9Sstevel@tonic-gate disp_t *dp;
3917c478bd9Sstevel@tonic-gate
3927c478bd9Sstevel@tonic-gate dp = dptr->dp;
3937c478bd9Sstevel@tonic-gate dptr->olddispq = dp->disp_q;
3947c478bd9Sstevel@tonic-gate dptr->olddqactmap = dp->disp_qactmap;
3957c478bd9Sstevel@tonic-gate dptr->oldnglobpris = dp->disp_npri;
3967c478bd9Sstevel@tonic-gate
3977c478bd9Sstevel@tonic-gate ASSERT(dptr->oldnglobpris < numpris);
3987c478bd9Sstevel@tonic-gate
3997c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL) {
4007c478bd9Sstevel@tonic-gate /*
4017c478bd9Sstevel@tonic-gate * Use kcopy because bcopy is platform-specific
4027c478bd9Sstevel@tonic-gate * and could block while we might have paused the cpus.
4037c478bd9Sstevel@tonic-gate */
4047c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddispq, dptr->newdispq,
4057c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t));
4067c478bd9Sstevel@tonic-gate (void) kcopy(dptr->olddqactmap, dptr->newdqactmap,
4077c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) *
4087c478bd9Sstevel@tonic-gate sizeof (long));
4097c478bd9Sstevel@tonic-gate }
4107c478bd9Sstevel@tonic-gate dp->disp_q = dptr->newdispq;
4117c478bd9Sstevel@tonic-gate dp->disp_qactmap = dptr->newdqactmap;
4127c478bd9Sstevel@tonic-gate dp->disp_q_limit = &dptr->newdispq[numpris];
4137c478bd9Sstevel@tonic-gate dp->disp_npri = numpris;
4147c478bd9Sstevel@tonic-gate }
4157c478bd9Sstevel@tonic-gate
4167c478bd9Sstevel@tonic-gate static void
disp_dq_free(struct disp_queue_info * dptr)4177c478bd9Sstevel@tonic-gate disp_dq_free(struct disp_queue_info *dptr)
4187c478bd9Sstevel@tonic-gate {
4197c478bd9Sstevel@tonic-gate if (dptr->olddispq != NULL)
4207c478bd9Sstevel@tonic-gate kmem_free(dptr->olddispq,
4217c478bd9Sstevel@tonic-gate dptr->oldnglobpris * sizeof (dispq_t));
4227c478bd9Sstevel@tonic-gate if (dptr->olddqactmap != NULL)
4237c478bd9Sstevel@tonic-gate kmem_free(dptr->olddqactmap,
4247c478bd9Sstevel@tonic-gate ((dptr->oldnglobpris / BT_NBIPUL) + 1) * sizeof (long));
4257c478bd9Sstevel@tonic-gate }
4267c478bd9Sstevel@tonic-gate
4277c478bd9Sstevel@tonic-gate /*
4287c478bd9Sstevel@tonic-gate * For a newly created CPU, initialize the dispatch queue.
4297c478bd9Sstevel@tonic-gate * This is called before the CPU is known through cpu[] or on any lists.
4307c478bd9Sstevel@tonic-gate */
4317c478bd9Sstevel@tonic-gate void
disp_cpu_init(cpu_t * cp)4327c478bd9Sstevel@tonic-gate disp_cpu_init(cpu_t *cp)
4337c478bd9Sstevel@tonic-gate {
4347c478bd9Sstevel@tonic-gate disp_t *dp;
4357c478bd9Sstevel@tonic-gate dispq_t *newdispq;
4367c478bd9Sstevel@tonic-gate ulong_t *newdqactmap;
4377c478bd9Sstevel@tonic-gate
4387c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock)); /* protect dispatcher queue sizes */
4397c478bd9Sstevel@tonic-gate
4407c478bd9Sstevel@tonic-gate if (cp == cpu0_disp.disp_cpu)
4417c478bd9Sstevel@tonic-gate dp = &cpu0_disp;
4427c478bd9Sstevel@tonic-gate else
4437c478bd9Sstevel@tonic-gate dp = kmem_alloc(sizeof (disp_t), KM_SLEEP);
4447c478bd9Sstevel@tonic-gate bzero(dp, sizeof (disp_t));
4457c478bd9Sstevel@tonic-gate cp->cpu_disp = dp;
4467c478bd9Sstevel@tonic-gate dp->disp_cpu = cp;
4477c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1;
4487c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1;
4497c478bd9Sstevel@tonic-gate DISP_LOCK_INIT(&cp->cpu_thread_lock);
4507c478bd9Sstevel@tonic-gate /*
4517c478bd9Sstevel@tonic-gate * Allocate memory for the dispatcher queue headers
4527c478bd9Sstevel@tonic-gate * and the active queue bitmap.
4537c478bd9Sstevel@tonic-gate */
4547c478bd9Sstevel@tonic-gate newdispq = kmem_zalloc(v.v_nglobpris * sizeof (dispq_t), KM_SLEEP);
4557c478bd9Sstevel@tonic-gate newdqactmap = kmem_zalloc(((v.v_nglobpris / BT_NBIPUL) + 1) *
4567c478bd9Sstevel@tonic-gate sizeof (long), KM_SLEEP);
4577c478bd9Sstevel@tonic-gate dp->disp_q = newdispq;
4587c478bd9Sstevel@tonic-gate dp->disp_qactmap = newdqactmap;
4597c478bd9Sstevel@tonic-gate dp->disp_q_limit = &newdispq[v.v_nglobpris];
4607c478bd9Sstevel@tonic-gate dp->disp_npri = v.v_nglobpris;
4617c478bd9Sstevel@tonic-gate }
4627c478bd9Sstevel@tonic-gate
4637c478bd9Sstevel@tonic-gate void
disp_cpu_fini(cpu_t * cp)4647c478bd9Sstevel@tonic-gate disp_cpu_fini(cpu_t *cp)
4657c478bd9Sstevel@tonic-gate {
4667c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(&cpu_lock));
4677c478bd9Sstevel@tonic-gate
4687c478bd9Sstevel@tonic-gate disp_kp_free(cp->cpu_disp);
4697c478bd9Sstevel@tonic-gate if (cp->cpu_disp != &cpu0_disp)
4707c478bd9Sstevel@tonic-gate kmem_free(cp->cpu_disp, sizeof (disp_t));
4717c478bd9Sstevel@tonic-gate }
4727c478bd9Sstevel@tonic-gate
4737c478bd9Sstevel@tonic-gate /*
4747c478bd9Sstevel@tonic-gate * Allocate new, larger kpreempt dispatch queue to replace the old one.
4757c478bd9Sstevel@tonic-gate */
4767c478bd9Sstevel@tonic-gate void
disp_kp_alloc(disp_t * dq,pri_t npri)4777c478bd9Sstevel@tonic-gate disp_kp_alloc(disp_t *dq, pri_t npri)
4787c478bd9Sstevel@tonic-gate {
4797c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info;
4807c478bd9Sstevel@tonic-gate
4817c478bd9Sstevel@tonic-gate if (npri > dq->disp_npri) {
4827c478bd9Sstevel@tonic-gate /*
4837c478bd9Sstevel@tonic-gate * Allocate memory for the new array.
4847c478bd9Sstevel@tonic-gate */
4857c478bd9Sstevel@tonic-gate disp_dq_alloc(&mem_info, npri, dq);
4867c478bd9Sstevel@tonic-gate
4877c478bd9Sstevel@tonic-gate /*
4887c478bd9Sstevel@tonic-gate * We need to copy the old structures to the new
4897c478bd9Sstevel@tonic-gate * and free the old.
4907c478bd9Sstevel@tonic-gate */
4917c478bd9Sstevel@tonic-gate disp_dq_assign(&mem_info, npri);
4927c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info);
4937c478bd9Sstevel@tonic-gate }
4947c478bd9Sstevel@tonic-gate }
4957c478bd9Sstevel@tonic-gate
4967c478bd9Sstevel@tonic-gate /*
4977c478bd9Sstevel@tonic-gate * Free dispatch queue.
4987c478bd9Sstevel@tonic-gate * Used for the kpreempt queues for a removed CPU partition and
4997c478bd9Sstevel@tonic-gate * for the per-CPU queues of deleted CPUs.
5007c478bd9Sstevel@tonic-gate */
5017c478bd9Sstevel@tonic-gate void
disp_kp_free(disp_t * dq)5027c478bd9Sstevel@tonic-gate disp_kp_free(disp_t *dq)
5037c478bd9Sstevel@tonic-gate {
5047c478bd9Sstevel@tonic-gate struct disp_queue_info mem_info;
5057c478bd9Sstevel@tonic-gate
5067c478bd9Sstevel@tonic-gate mem_info.olddispq = dq->disp_q;
5077c478bd9Sstevel@tonic-gate mem_info.olddqactmap = dq->disp_qactmap;
5087c478bd9Sstevel@tonic-gate mem_info.oldnglobpris = dq->disp_npri;
5097c478bd9Sstevel@tonic-gate disp_dq_free(&mem_info);
5107c478bd9Sstevel@tonic-gate }
5117c478bd9Sstevel@tonic-gate
5127c478bd9Sstevel@tonic-gate /*
5137c478bd9Sstevel@tonic-gate * End dispatcher and scheduler initialization.
5147c478bd9Sstevel@tonic-gate */
5157c478bd9Sstevel@tonic-gate
5167c478bd9Sstevel@tonic-gate /*
5177c478bd9Sstevel@tonic-gate * See if there's anything to do other than remain idle.
5187c478bd9Sstevel@tonic-gate * Return non-zero if there is.
5197c478bd9Sstevel@tonic-gate *
5207c478bd9Sstevel@tonic-gate * This function must be called with high spl, or with
5217c478bd9Sstevel@tonic-gate * kernel preemption disabled to prevent the partition's
5227c478bd9Sstevel@tonic-gate * active cpu list from changing while being traversed.
5237c478bd9Sstevel@tonic-gate *
5246890d023SEric Saxe * This is essentially a simpler version of disp_getwork()
5256890d023SEric Saxe * to be called by CPUs preparing to "halt".
5267c478bd9Sstevel@tonic-gate */
5277c478bd9Sstevel@tonic-gate int
disp_anywork(void)5287c478bd9Sstevel@tonic-gate disp_anywork(void)
5297c478bd9Sstevel@tonic-gate {
5307c478bd9Sstevel@tonic-gate cpu_t *cp = CPU;
5317c478bd9Sstevel@tonic-gate cpu_t *ocp;
5326890d023SEric Saxe volatile int *local_nrunnable = &cp->cpu_disp->disp_nrunnable;
5337c478bd9Sstevel@tonic-gate
5347c478bd9Sstevel@tonic-gate if (!(cp->cpu_flags & CPU_OFFLINE)) {
5357c478bd9Sstevel@tonic-gate if (CP_MAXRUNPRI(cp->cpu_part) >= 0)
5367c478bd9Sstevel@tonic-gate return (1);
5377c478bd9Sstevel@tonic-gate
5386890d023SEric Saxe for (ocp = cp->cpu_next_part; ocp != cp;
5396890d023SEric Saxe ocp = ocp->cpu_next_part) {
5406890d023SEric Saxe ASSERT(CPU_ACTIVE(ocp));
5416890d023SEric Saxe
5426890d023SEric Saxe /*
5436890d023SEric Saxe * Something has appeared on the local run queue.
5446890d023SEric Saxe */
5456890d023SEric Saxe if (*local_nrunnable > 0)
5466890d023SEric Saxe return (1);
5476890d023SEric Saxe /*
5486890d023SEric Saxe * If we encounter another idle CPU that will
5496890d023SEric Saxe * soon be trolling around through disp_anywork()
5506890d023SEric Saxe * terminate our walk here and let this other CPU
5516890d023SEric Saxe * patrol the next part of the list.
5526890d023SEric Saxe */
5536890d023SEric Saxe if (ocp->cpu_dispatch_pri == -1 &&
5546890d023SEric Saxe (ocp->cpu_disp_flags & CPU_DISP_HALTED) == 0)
5556890d023SEric Saxe return (0);
5567c478bd9Sstevel@tonic-gate /*
5577c478bd9Sstevel@tonic-gate * Work can be taken from another CPU if:
5587c478bd9Sstevel@tonic-gate * - There is unbound work on the run queue
5597c478bd9Sstevel@tonic-gate * - That work isn't a thread undergoing a
5607c478bd9Sstevel@tonic-gate * - context switch on an otherwise empty queue.
5617c478bd9Sstevel@tonic-gate * - The CPU isn't running the idle loop.
5627c478bd9Sstevel@tonic-gate */
5637c478bd9Sstevel@tonic-gate if (ocp->cpu_disp->disp_max_unbound_pri != -1 &&
5647c478bd9Sstevel@tonic-gate !((ocp->cpu_disp_flags & CPU_DISP_DONTSTEAL) &&
5657c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1) &&
5667c478bd9Sstevel@tonic-gate ocp->cpu_dispatch_pri != -1)
5677c478bd9Sstevel@tonic-gate return (1);
5687c478bd9Sstevel@tonic-gate }
5697c478bd9Sstevel@tonic-gate }
5707c478bd9Sstevel@tonic-gate return (0);
5717c478bd9Sstevel@tonic-gate }
5727c478bd9Sstevel@tonic-gate
5737c478bd9Sstevel@tonic-gate /*
5747c478bd9Sstevel@tonic-gate * Called when CPU enters the idle loop
5757c478bd9Sstevel@tonic-gate */
5767c478bd9Sstevel@tonic-gate static void
idle_enter()5777c478bd9Sstevel@tonic-gate idle_enter()
5787c478bd9Sstevel@tonic-gate {
5797c478bd9Sstevel@tonic-gate cpu_t *cp = CPU;
5807c478bd9Sstevel@tonic-gate
581eda89462Sesolom new_cpu_mstate(CMS_IDLE, gethrtime_unscaled());
5827c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, idlethread, 1);
5837c478bd9Sstevel@tonic-gate set_idle_cpu(cp->cpu_id); /* arch-dependent hook */
5847c478bd9Sstevel@tonic-gate }
5857c478bd9Sstevel@tonic-gate
5867c478bd9Sstevel@tonic-gate /*
5877c478bd9Sstevel@tonic-gate * Called when CPU exits the idle loop
5887c478bd9Sstevel@tonic-gate */
5897c478bd9Sstevel@tonic-gate static void
idle_exit()5907c478bd9Sstevel@tonic-gate idle_exit()
5917c478bd9Sstevel@tonic-gate {
5927c478bd9Sstevel@tonic-gate cpu_t *cp = CPU;
5937c478bd9Sstevel@tonic-gate
594eda89462Sesolom new_cpu_mstate(CMS_SYSTEM, gethrtime_unscaled());
5957c478bd9Sstevel@tonic-gate unset_idle_cpu(cp->cpu_id); /* arch-dependent hook */
5967c478bd9Sstevel@tonic-gate }
5977c478bd9Sstevel@tonic-gate
5987c478bd9Sstevel@tonic-gate /*
5997c478bd9Sstevel@tonic-gate * Idle loop.
6007c478bd9Sstevel@tonic-gate */
6017c478bd9Sstevel@tonic-gate void
idle()6027c478bd9Sstevel@tonic-gate idle()
6037c478bd9Sstevel@tonic-gate {
6047c478bd9Sstevel@tonic-gate struct cpu *cp = CPU; /* pointer to this CPU */
6057c478bd9Sstevel@tonic-gate kthread_t *t; /* taken thread */
6067c478bd9Sstevel@tonic-gate
6077c478bd9Sstevel@tonic-gate idle_enter();
6087c478bd9Sstevel@tonic-gate
6097c478bd9Sstevel@tonic-gate /*
6107c478bd9Sstevel@tonic-gate * Uniprocessor version of idle loop.
6117c478bd9Sstevel@tonic-gate * Do this until notified that we're on an actual multiprocessor.
6127c478bd9Sstevel@tonic-gate */
6137c478bd9Sstevel@tonic-gate while (ncpus == 1) {
6147c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable == 0) {
6157c478bd9Sstevel@tonic-gate (*idle_cpu)();
6167c478bd9Sstevel@tonic-gate continue;
6177c478bd9Sstevel@tonic-gate }
6187c478bd9Sstevel@tonic-gate idle_exit();
6197c478bd9Sstevel@tonic-gate swtch();
6207c478bd9Sstevel@tonic-gate
6217c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch */
6227c478bd9Sstevel@tonic-gate }
6237c478bd9Sstevel@tonic-gate
6247c478bd9Sstevel@tonic-gate /*
6257c478bd9Sstevel@tonic-gate * Multiprocessor idle loop.
6267c478bd9Sstevel@tonic-gate */
6277c478bd9Sstevel@tonic-gate for (;;) {
6287c478bd9Sstevel@tonic-gate /*
6297c478bd9Sstevel@tonic-gate * If CPU is completely quiesced by p_online(2), just wait
6307c478bd9Sstevel@tonic-gate * here with minimal bus traffic until put online.
6317c478bd9Sstevel@tonic-gate */
6327c478bd9Sstevel@tonic-gate while (cp->cpu_flags & CPU_QUIESCED)
6337c478bd9Sstevel@tonic-gate (*idle_cpu)();
6347c478bd9Sstevel@tonic-gate
6357c478bd9Sstevel@tonic-gate if (cp->cpu_disp->disp_nrunnable != 0) {
6367c478bd9Sstevel@tonic-gate idle_exit();
6377c478bd9Sstevel@tonic-gate swtch();
6387c478bd9Sstevel@tonic-gate } else {
6397c478bd9Sstevel@tonic-gate if (cp->cpu_flags & CPU_OFFLINE)
6407c478bd9Sstevel@tonic-gate continue;
6417c478bd9Sstevel@tonic-gate if ((t = disp_getwork(cp)) == NULL) {
6427c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level != -1) {
6437c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp;
6447c478bd9Sstevel@tonic-gate disp_t *kpq;
6457c478bd9Sstevel@tonic-gate
6467c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock);
6477c478bd9Sstevel@tonic-gate /*
6487c478bd9Sstevel@tonic-gate * Set kpq under lock to prevent
6497c478bd9Sstevel@tonic-gate * migration between partitions.
6507c478bd9Sstevel@tonic-gate */
6517c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue;
6527c478bd9Sstevel@tonic-gate if (kpq->disp_maxrunpri == -1)
6537c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = -1;
6547c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock);
6557c478bd9Sstevel@tonic-gate }
6567c478bd9Sstevel@tonic-gate (*idle_cpu)();
6577c478bd9Sstevel@tonic-gate continue;
6587c478bd9Sstevel@tonic-gate }
659685679f7Sakolb /*
660685679f7Sakolb * If there was a thread but we couldn't steal
661685679f7Sakolb * it, then keep trying.
662685679f7Sakolb */
663685679f7Sakolb if (t == T_DONTSTEAL)
664685679f7Sakolb continue;
6657c478bd9Sstevel@tonic-gate idle_exit();
6667c478bd9Sstevel@tonic-gate swtch_to(t);
6677c478bd9Sstevel@tonic-gate }
6687c478bd9Sstevel@tonic-gate idle_enter(); /* returned from swtch/swtch_to */
6697c478bd9Sstevel@tonic-gate }
6707c478bd9Sstevel@tonic-gate }
6717c478bd9Sstevel@tonic-gate
6727c478bd9Sstevel@tonic-gate
6737c478bd9Sstevel@tonic-gate /*
6747c478bd9Sstevel@tonic-gate * Preempt the currently running thread in favor of the highest
6757c478bd9Sstevel@tonic-gate * priority thread. The class of the current thread controls
6767c478bd9Sstevel@tonic-gate * where it goes on the dispatcher queues. If panicking, turn
6777c478bd9Sstevel@tonic-gate * preemption off.
6787c478bd9Sstevel@tonic-gate */
6797c478bd9Sstevel@tonic-gate void
preempt()6807c478bd9Sstevel@tonic-gate preempt()
6817c478bd9Sstevel@tonic-gate {
6827c478bd9Sstevel@tonic-gate kthread_t *t = curthread;
6837c478bd9Sstevel@tonic-gate klwp_t *lwp = ttolwp(curthread);
6847c478bd9Sstevel@tonic-gate
6857c478bd9Sstevel@tonic-gate if (panicstr)
6867c478bd9Sstevel@tonic-gate return;
6877c478bd9Sstevel@tonic-gate
6887c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_START, "preempt_start");
6897c478bd9Sstevel@tonic-gate
6907c478bd9Sstevel@tonic-gate thread_lock(t);
6917c478bd9Sstevel@tonic-gate
6927c478bd9Sstevel@tonic-gate if (t->t_state != TS_ONPROC || t->t_disp_queue != CPU->cpu_disp) {
6937c478bd9Sstevel@tonic-gate /*
6947c478bd9Sstevel@tonic-gate * this thread has already been chosen to be run on
6957c478bd9Sstevel@tonic-gate * another CPU. Clear kprunrun on this CPU since we're
6967c478bd9Sstevel@tonic-gate * already headed for swtch().
6977c478bd9Sstevel@tonic-gate */
6987c478bd9Sstevel@tonic-gate CPU->cpu_kprunrun = 0;
6997c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t);
7007c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end");
7017c478bd9Sstevel@tonic-gate } else {
7027c478bd9Sstevel@tonic-gate if (lwp != NULL)
7037c478bd9Sstevel@tonic-gate lwp->lwp_ru.nivcsw++;
7047c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, inv_swtch, 1);
7057c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t);
7067c478bd9Sstevel@tonic-gate CL_PREEMPT(t);
7077c478bd9Sstevel@tonic-gate DTRACE_SCHED(preempt);
7087c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t);
7097c478bd9Sstevel@tonic-gate
7107c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_PREEMPT_END, "preempt_end");
7117c478bd9Sstevel@tonic-gate
7127c478bd9Sstevel@tonic-gate swtch(); /* clears CPU->cpu_runrun via disp() */
7137c478bd9Sstevel@tonic-gate }
7147c478bd9Sstevel@tonic-gate }
7157c478bd9Sstevel@tonic-gate
7167c478bd9Sstevel@tonic-gate extern kthread_t *thread_unpin();
7177c478bd9Sstevel@tonic-gate
7187c478bd9Sstevel@tonic-gate /*
7197c478bd9Sstevel@tonic-gate * disp() - find the highest priority thread for this processor to run, and
7207c478bd9Sstevel@tonic-gate * set it in TS_ONPROC state so that resume() can be called to run it.
7217c478bd9Sstevel@tonic-gate */
7227c478bd9Sstevel@tonic-gate static kthread_t *
disp()7237c478bd9Sstevel@tonic-gate disp()
7247c478bd9Sstevel@tonic-gate {
7257c478bd9Sstevel@tonic-gate cpu_t *cpup;
7267c478bd9Sstevel@tonic-gate disp_t *dp;
7277c478bd9Sstevel@tonic-gate kthread_t *tp;
7287c478bd9Sstevel@tonic-gate dispq_t *dq;
7297c478bd9Sstevel@tonic-gate int maxrunword;
7307c478bd9Sstevel@tonic-gate pri_t pri;
7317c478bd9Sstevel@tonic-gate disp_t *kpq;
7327c478bd9Sstevel@tonic-gate
7337c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_DISP_START, "disp_start");
7347c478bd9Sstevel@tonic-gate
7357c478bd9Sstevel@tonic-gate cpup = CPU;
7367c478bd9Sstevel@tonic-gate /*
7377c478bd9Sstevel@tonic-gate * Find the highest priority loaded, runnable thread.
7387c478bd9Sstevel@tonic-gate */
7397c478bd9Sstevel@tonic-gate dp = cpup->cpu_disp;
7407c478bd9Sstevel@tonic-gate
7417c478bd9Sstevel@tonic-gate reschedule:
7427c478bd9Sstevel@tonic-gate /*
7437c478bd9Sstevel@tonic-gate * If there is more important work on the global queue with a better
7447c478bd9Sstevel@tonic-gate * priority than the maximum on this CPU, take it now.
7457c478bd9Sstevel@tonic-gate */
7467c478bd9Sstevel@tonic-gate kpq = &cpup->cpu_part->cp_kp_queue;
7477c478bd9Sstevel@tonic-gate while ((pri = kpq->disp_maxrunpri) >= 0 &&
7487c478bd9Sstevel@tonic-gate pri >= dp->disp_maxrunpri &&
7497c478bd9Sstevel@tonic-gate (cpup->cpu_flags & CPU_OFFLINE) == 0 &&
7507c478bd9Sstevel@tonic-gate (tp = disp_getbest(kpq)) != NULL) {
7517c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) != NULL) {
7527c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END,
7537c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp);
7547c478bd9Sstevel@tonic-gate return (tp);
7557c478bd9Sstevel@tonic-gate }
7567c478bd9Sstevel@tonic-gate }
7577c478bd9Sstevel@tonic-gate
7587c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock);
7597c478bd9Sstevel@tonic-gate pri = dp->disp_maxrunpri;
7607c478bd9Sstevel@tonic-gate
7617c478bd9Sstevel@tonic-gate /*
7627c478bd9Sstevel@tonic-gate * If there is nothing to run, look at what's runnable on other queues.
7637c478bd9Sstevel@tonic-gate * Choose the idle thread if the CPU is quiesced.
7647c478bd9Sstevel@tonic-gate * Note that CPUs that have the CPU_OFFLINE flag set can still run
7657c478bd9Sstevel@tonic-gate * interrupt threads, which will be the only threads on the CPU's own
7667c478bd9Sstevel@tonic-gate * queue, but cannot run threads from other queues.
7677c478bd9Sstevel@tonic-gate */
7687c478bd9Sstevel@tonic-gate if (pri == -1) {
7697c478bd9Sstevel@tonic-gate if (!(cpup->cpu_flags & CPU_OFFLINE)) {
7707c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock);
771685679f7Sakolb if ((tp = disp_getwork(cpup)) == NULL ||
772685679f7Sakolb tp == T_DONTSTEAL) {
7737c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread;
7747c478bd9Sstevel@tonic-gate (void) splhigh();
7757c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup);
7767c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp;
7777c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1;
7787c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0;
7797c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1;
7807c478bd9Sstevel@tonic-gate }
7817c478bd9Sstevel@tonic-gate } else {
7827c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock);
7837c478bd9Sstevel@tonic-gate tp = cpup->cpu_idle_thread;
7847c478bd9Sstevel@tonic-gate THREAD_ONPROC(tp, cpup);
7857c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp;
7867c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = -1;
7877c478bd9Sstevel@tonic-gate cpup->cpu_runrun = cpup->cpu_kprunrun = 0;
7887c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1;
7897c478bd9Sstevel@tonic-gate }
7907c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END,
7917c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp);
7927c478bd9Sstevel@tonic-gate return (tp);
7937c478bd9Sstevel@tonic-gate }
7947c478bd9Sstevel@tonic-gate
7957c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri];
7967c478bd9Sstevel@tonic-gate tp = dq->dq_first;
7977c478bd9Sstevel@tonic-gate
7987c478bd9Sstevel@tonic-gate ASSERT(tp != NULL);
7997c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD); /* thread must be swapped in */
8007c478bd9Sstevel@tonic-gate
8017c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp);
8027c478bd9Sstevel@tonic-gate
8037c478bd9Sstevel@tonic-gate /*
8047c478bd9Sstevel@tonic-gate * Found it so remove it from queue.
8057c478bd9Sstevel@tonic-gate */
8067c478bd9Sstevel@tonic-gate dp->disp_nrunnable--;
8077c478bd9Sstevel@tonic-gate dq->dq_sruncnt--;
8087c478bd9Sstevel@tonic-gate if ((dq->dq_first = tp->t_link) == NULL) {
8097c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap;
8107c478bd9Sstevel@tonic-gate
8117c478bd9Sstevel@tonic-gate ASSERT(dq->dq_sruncnt == 0);
8127c478bd9Sstevel@tonic-gate dq->dq_last = NULL;
8137c478bd9Sstevel@tonic-gate
8147c478bd9Sstevel@tonic-gate /*
8157c478bd9Sstevel@tonic-gate * The queue is empty, so the corresponding bit needs to be
8167c478bd9Sstevel@tonic-gate * turned off in dqactmap. If nrunnable != 0 just took the
8177c478bd9Sstevel@tonic-gate * last runnable thread off the
8187c478bd9Sstevel@tonic-gate * highest queue, so recompute disp_maxrunpri.
8197c478bd9Sstevel@tonic-gate */
8207c478bd9Sstevel@tonic-gate maxrunword = pri >> BT_ULSHIFT;
8217c478bd9Sstevel@tonic-gate dqactmap[maxrunword] &= ~BT_BIW(pri);
8227c478bd9Sstevel@tonic-gate
8237c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) {
8247c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1;
8257c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1;
8267c478bd9Sstevel@tonic-gate } else {
8277c478bd9Sstevel@tonic-gate int ipri;
8287c478bd9Sstevel@tonic-gate
8297c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dqactmap, maxrunword);
8307c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri;
8317c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri)
8327c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri;
8337c478bd9Sstevel@tonic-gate }
8347c478bd9Sstevel@tonic-gate } else {
8357c478bd9Sstevel@tonic-gate tp->t_link = NULL;
8367c478bd9Sstevel@tonic-gate }
8377c478bd9Sstevel@tonic-gate
8387c478bd9Sstevel@tonic-gate /*
8397c478bd9Sstevel@tonic-gate * Set TS_DONT_SWAP flag to prevent another processor from swapping
8407c478bd9Sstevel@tonic-gate * out this thread before we have a chance to run it.
8417c478bd9Sstevel@tonic-gate * While running, it is protected against swapping by t_lock.
8427c478bd9Sstevel@tonic-gate */
8437c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP;
8447c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = tp; /* protected by spl only */
8457c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = pri;
8467c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp));
8477c478bd9Sstevel@tonic-gate thread_onproc(tp, cpup); /* set t_state to TS_ONPROC */
8487c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock); /* drop run queue lock */
8497c478bd9Sstevel@tonic-gate
8507c478bd9Sstevel@tonic-gate ASSERT(tp != NULL);
8517c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_DISP, TR_DISP_END,
8527c478bd9Sstevel@tonic-gate "disp_end:tid %p", tp);
8537c478bd9Sstevel@tonic-gate
8547c478bd9Sstevel@tonic-gate if (disp_ratify(tp, kpq) == NULL)
8557c478bd9Sstevel@tonic-gate goto reschedule;
8567c478bd9Sstevel@tonic-gate
8577c478bd9Sstevel@tonic-gate return (tp);
8587c478bd9Sstevel@tonic-gate }
8597c478bd9Sstevel@tonic-gate
8607c478bd9Sstevel@tonic-gate /*
8617c478bd9Sstevel@tonic-gate * swtch()
8627c478bd9Sstevel@tonic-gate * Find best runnable thread and run it.
8637c478bd9Sstevel@tonic-gate * Called with the current thread already switched to a new state,
8647c478bd9Sstevel@tonic-gate * on a sleep queue, run queue, stopped, and not zombied.
8657c478bd9Sstevel@tonic-gate * May be called at any spl level less than or equal to LOCK_LEVEL.
8667c478bd9Sstevel@tonic-gate * Always drops spl to the base level (spl0()).
8677c478bd9Sstevel@tonic-gate */
8687c478bd9Sstevel@tonic-gate void
swtch()8697c478bd9Sstevel@tonic-gate swtch()
8707c478bd9Sstevel@tonic-gate {
8717c478bd9Sstevel@tonic-gate kthread_t *t = curthread;
8727c478bd9Sstevel@tonic-gate kthread_t *next;
8737c478bd9Sstevel@tonic-gate cpu_t *cp;
8747c478bd9Sstevel@tonic-gate
8757c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start");
8767c478bd9Sstevel@tonic-gate
8777c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD)
8787c478bd9Sstevel@tonic-gate cpu_intr_swtch_enter(t);
8797c478bd9Sstevel@tonic-gate
8807c478bd9Sstevel@tonic-gate if (t->t_intr != NULL) {
8817c478bd9Sstevel@tonic-gate /*
8827c478bd9Sstevel@tonic-gate * We are an interrupt thread. Setup and return
8837c478bd9Sstevel@tonic-gate * the interrupted thread to be resumed.
8847c478bd9Sstevel@tonic-gate */
8857c478bd9Sstevel@tonic-gate (void) splhigh(); /* block other scheduler action */
8867c478bd9Sstevel@tonic-gate cp = CPU; /* now protected against migration */
8877c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */
8887c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1);
8897c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, intrblk, 1);
8907c478bd9Sstevel@tonic-gate next = thread_unpin();
8917c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start");
8927c478bd9Sstevel@tonic-gate resume_from_intr(next);
8937c478bd9Sstevel@tonic-gate } else {
8947c478bd9Sstevel@tonic-gate #ifdef DEBUG
8957c478bd9Sstevel@tonic-gate if (t->t_state == TS_ONPROC &&
8967c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu == CPU &&
8977c478bd9Sstevel@tonic-gate t->t_preempt == 0) {
8987c478bd9Sstevel@tonic-gate thread_lock(t);
8997c478bd9Sstevel@tonic-gate ASSERT(t->t_state != TS_ONPROC ||
9007c478bd9Sstevel@tonic-gate t->t_disp_queue->disp_cpu != CPU ||
9017c478bd9Sstevel@tonic-gate t->t_preempt != 0); /* cannot migrate */
9027c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(t);
9037c478bd9Sstevel@tonic-gate }
9047c478bd9Sstevel@tonic-gate #endif /* DEBUG */
9057c478bd9Sstevel@tonic-gate cp = CPU;
9067c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */
9077c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(cp) == 0); /* not called with PIL > 10 */
9087c478bd9Sstevel@tonic-gate
9097c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */
9107c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL;
9117c478bd9Sstevel@tonic-gate
9127c478bd9Sstevel@tonic-gate if (next != t) {
9130e751525SEric Saxe hrtime_t now;
9140e751525SEric Saxe
9150e751525SEric Saxe now = gethrtime_unscaled();
9160e751525SEric Saxe pg_ev_thread_swtch(cp, now, t, next);
9177c478bd9Sstevel@tonic-gate
918f2bd4627Sjohansen /*
919f2bd4627Sjohansen * If t was previously in the TS_ONPROC state,
920f2bd4627Sjohansen * setfrontdq and setbackdq won't have set its t_waitrq.
921f2bd4627Sjohansen * Since we now finally know that we're switching away
922f2bd4627Sjohansen * from this thread, set its t_waitrq if it is on a run
923f2bd4627Sjohansen * queue.
924f2bd4627Sjohansen */
925f2bd4627Sjohansen if ((t->t_state == TS_RUN) && (t->t_waitrq == 0)) {
9260e751525SEric Saxe t->t_waitrq = now;
927f2bd4627Sjohansen }
928f2bd4627Sjohansen
929f2bd4627Sjohansen /*
930f2bd4627Sjohansen * restore mstate of thread that we are switching to
931f2bd4627Sjohansen */
932f2bd4627Sjohansen restore_mstate(next);
933f2bd4627Sjohansen
9347c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1);
935d3d50737SRafael Vanoni cp->cpu_last_swtch = t->t_disp_time = ddi_get_lbolt();
9367c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start");
9377c478bd9Sstevel@tonic-gate
9387c478bd9Sstevel@tonic-gate if (dtrace_vtime_active)
9397c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next);
9407c478bd9Sstevel@tonic-gate
9417c478bd9Sstevel@tonic-gate resume(next);
9427c478bd9Sstevel@tonic-gate /*
9437c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points
9447c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not
9457c478bd9Sstevel@tonic-gate * return here
9467c478bd9Sstevel@tonic-gate */
9477c478bd9Sstevel@tonic-gate } else {
9487c478bd9Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD)
9497c478bd9Sstevel@tonic-gate cpu_intr_swtch_exit(t);
9501dbbbf76SSudheer A /*
9511dbbbf76SSudheer A * Threads that enqueue themselves on a run queue defer
9521dbbbf76SSudheer A * setting t_waitrq. It is then either set in swtch()
9531dbbbf76SSudheer A * when the CPU is actually yielded, or not at all if it
9541dbbbf76SSudheer A * is remaining on the CPU.
9551dbbbf76SSudheer A * There is however a window between where the thread
9561dbbbf76SSudheer A * placed itself on a run queue, and where it selects
9571dbbbf76SSudheer A * itself in disp(), where a third party (eg. clock()
9581dbbbf76SSudheer A * doing tick processing) may have re-enqueued this
9591dbbbf76SSudheer A * thread, setting t_waitrq in the process. We detect
9601dbbbf76SSudheer A * this race by noticing that despite switching to
9611dbbbf76SSudheer A * ourself, our t_waitrq has been set, and should be
9621dbbbf76SSudheer A * cleared.
9631dbbbf76SSudheer A */
9641dbbbf76SSudheer A if (t->t_waitrq != 0)
9651dbbbf76SSudheer A t->t_waitrq = 0;
9667c478bd9Sstevel@tonic-gate
9670e751525SEric Saxe pg_ev_thread_remain(cp, t);
9680e751525SEric Saxe
9697c478bd9Sstevel@tonic-gate DTRACE_SCHED(remain__cpu);
9707c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_END, "swtch_end");
9717c478bd9Sstevel@tonic-gate (void) spl0();
9727c478bd9Sstevel@tonic-gate }
9737c478bd9Sstevel@tonic-gate }
9747c478bd9Sstevel@tonic-gate }
9757c478bd9Sstevel@tonic-gate
9767c478bd9Sstevel@tonic-gate /*
9777c478bd9Sstevel@tonic-gate * swtch_from_zombie()
9787c478bd9Sstevel@tonic-gate * Special case of swtch(), which allows checks for TS_ZOMB to be
9797c478bd9Sstevel@tonic-gate * eliminated from normal resume.
9807c478bd9Sstevel@tonic-gate * Find best runnable thread and run it.
9817c478bd9Sstevel@tonic-gate * Called with the current thread zombied.
9827c478bd9Sstevel@tonic-gate * Zombies cannot migrate, so CPU references are safe.
9837c478bd9Sstevel@tonic-gate */
9847c478bd9Sstevel@tonic-gate void
swtch_from_zombie()9857c478bd9Sstevel@tonic-gate swtch_from_zombie()
9867c478bd9Sstevel@tonic-gate {
9877c478bd9Sstevel@tonic-gate kthread_t *next;
9887c478bd9Sstevel@tonic-gate cpu_t *cpu = CPU;
9897c478bd9Sstevel@tonic-gate
9907c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start");
9917c478bd9Sstevel@tonic-gate
9927c478bd9Sstevel@tonic-gate ASSERT(curthread->t_state == TS_ZOMB);
9937c478bd9Sstevel@tonic-gate
9947c478bd9Sstevel@tonic-gate next = disp(); /* returns with spl high */
9957c478bd9Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); /* not called with PIL > 10 */
9967c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(CPU, sys, pswitch, 1);
9977c478bd9Sstevel@tonic-gate ASSERT(next != curthread);
9987c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start");
9997c478bd9Sstevel@tonic-gate
10000e751525SEric Saxe pg_ev_thread_swtch(cpu, gethrtime_unscaled(), curthread, next);
10017c478bd9Sstevel@tonic-gate
1002f2bd4627Sjohansen restore_mstate(next);
1003f2bd4627Sjohansen
10047c478bd9Sstevel@tonic-gate if (dtrace_vtime_active)
10057c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next);
10067c478bd9Sstevel@tonic-gate
10077c478bd9Sstevel@tonic-gate resume_from_zombie(next);
10087c478bd9Sstevel@tonic-gate /*
10097c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points
10107c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we certainly will not
10117c478bd9Sstevel@tonic-gate * return here
10127c478bd9Sstevel@tonic-gate */
10137c478bd9Sstevel@tonic-gate }
10147c478bd9Sstevel@tonic-gate
10157c478bd9Sstevel@tonic-gate #if defined(DEBUG) && (defined(DISP_DEBUG) || defined(lint))
10167c478bd9Sstevel@tonic-gate
1017057452c6Sjj209869 /*
1018057452c6Sjj209869 * search_disp_queues()
1019057452c6Sjj209869 * Search the given dispatch queues for thread tp.
1020057452c6Sjj209869 * Return 1 if tp is found, otherwise return 0.
1021057452c6Sjj209869 */
1022057452c6Sjj209869 static int
search_disp_queues(disp_t * dp,kthread_t * tp)1023057452c6Sjj209869 search_disp_queues(disp_t *dp, kthread_t *tp)
1024057452c6Sjj209869 {
10257c478bd9Sstevel@tonic-gate dispq_t *dq;
10267c478bd9Sstevel@tonic-gate dispq_t *eq;
10277c478bd9Sstevel@tonic-gate
10287c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock);
1029057452c6Sjj209869
10307c478bd9Sstevel@tonic-gate for (dq = dp->disp_q, eq = dp->disp_q_limit; dq < eq; ++dq) {
10317c478bd9Sstevel@tonic-gate kthread_t *rp;
10327c478bd9Sstevel@tonic-gate
1033057452c6Sjj209869 ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL);
1034057452c6Sjj209869
10357c478bd9Sstevel@tonic-gate for (rp = dq->dq_first; rp; rp = rp->t_link)
10367c478bd9Sstevel@tonic-gate if (tp == rp) {
10377c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock);
10387c478bd9Sstevel@tonic-gate return (1);
10397c478bd9Sstevel@tonic-gate }
10407c478bd9Sstevel@tonic-gate }
10417c478bd9Sstevel@tonic-gate disp_lock_exit_high(&dp->disp_lock);
1042057452c6Sjj209869
10437c478bd9Sstevel@tonic-gate return (0);
1044057452c6Sjj209869 }
1045057452c6Sjj209869
1046057452c6Sjj209869 /*
1047057452c6Sjj209869 * thread_on_queue()
1048057452c6Sjj209869 * Search all per-CPU dispatch queues and all partition-wide kpreempt
1049057452c6Sjj209869 * queues for thread tp. Return 1 if tp is found, otherwise return 0.
1050057452c6Sjj209869 */
1051057452c6Sjj209869 static int
thread_on_queue(kthread_t * tp)1052057452c6Sjj209869 thread_on_queue(kthread_t *tp)
1053057452c6Sjj209869 {
1054057452c6Sjj209869 cpu_t *cp;
1055057452c6Sjj209869 struct cpupart *part;
1056057452c6Sjj209869
1057057452c6Sjj209869 ASSERT(getpil() >= DISP_LEVEL);
1058057452c6Sjj209869
1059057452c6Sjj209869 /*
1060057452c6Sjj209869 * Search the per-CPU dispatch queues for tp.
1061057452c6Sjj209869 */
1062057452c6Sjj209869 cp = CPU;
1063057452c6Sjj209869 do {
1064057452c6Sjj209869 if (search_disp_queues(cp->cpu_disp, tp))
1065057452c6Sjj209869 return (1);
1066057452c6Sjj209869 } while ((cp = cp->cpu_next_onln) != CPU);
1067057452c6Sjj209869
1068057452c6Sjj209869 /*
1069057452c6Sjj209869 * Search the partition-wide kpreempt queues for tp.
1070057452c6Sjj209869 */
1071057452c6Sjj209869 part = CPU->cpu_part;
1072057452c6Sjj209869 do {
1073057452c6Sjj209869 if (search_disp_queues(&part->cp_kp_queue, tp))
1074057452c6Sjj209869 return (1);
1075057452c6Sjj209869 } while ((part = part->cp_next) != CPU->cpu_part);
1076057452c6Sjj209869
1077057452c6Sjj209869 return (0);
1078057452c6Sjj209869 }
1079057452c6Sjj209869
10807c478bd9Sstevel@tonic-gate #else
10817c478bd9Sstevel@tonic-gate
10827c478bd9Sstevel@tonic-gate #define thread_on_queue(tp) 0 /* ASSERT must be !thread_on_queue */
10837c478bd9Sstevel@tonic-gate
10847c478bd9Sstevel@tonic-gate #endif /* DEBUG */
10857c478bd9Sstevel@tonic-gate
10867c478bd9Sstevel@tonic-gate /*
10877c478bd9Sstevel@tonic-gate * like swtch(), but switch to a specified thread taken from another CPU.
10887c478bd9Sstevel@tonic-gate * called with spl high..
10897c478bd9Sstevel@tonic-gate */
10907c478bd9Sstevel@tonic-gate void
swtch_to(kthread_t * next)10917c478bd9Sstevel@tonic-gate swtch_to(kthread_t *next)
10927c478bd9Sstevel@tonic-gate {
10937c478bd9Sstevel@tonic-gate cpu_t *cp = CPU;
10940e751525SEric Saxe hrtime_t now;
10957c478bd9Sstevel@tonic-gate
10967c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_SWTCH_START, "swtch_start");
10977c478bd9Sstevel@tonic-gate
10987c478bd9Sstevel@tonic-gate /*
10997c478bd9Sstevel@tonic-gate * Update context switch statistics.
11007c478bd9Sstevel@tonic-gate */
11017c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cp, sys, pswitch, 1);
11027c478bd9Sstevel@tonic-gate
11037c478bd9Sstevel@tonic-gate TRACE_0(TR_FAC_DISP, TR_RESUME_START, "resume_start");
11047c478bd9Sstevel@tonic-gate
11050e751525SEric Saxe now = gethrtime_unscaled();
11060e751525SEric Saxe pg_ev_thread_swtch(cp, now, curthread, next);
11077c478bd9Sstevel@tonic-gate
11087c478bd9Sstevel@tonic-gate /* OK to steal anything left on run queue */
11097c478bd9Sstevel@tonic-gate cp->cpu_disp_flags &= ~CPU_DISP_DONTSTEAL;
11107c478bd9Sstevel@tonic-gate
11117c478bd9Sstevel@tonic-gate /* record last execution time */
1112d3d50737SRafael Vanoni cp->cpu_last_swtch = curthread->t_disp_time = ddi_get_lbolt();
11137c478bd9Sstevel@tonic-gate
1114f2bd4627Sjohansen /*
1115f2bd4627Sjohansen * If t was previously in the TS_ONPROC state, setfrontdq and setbackdq
1116f2bd4627Sjohansen * won't have set its t_waitrq. Since we now finally know that we're
1117f2bd4627Sjohansen * switching away from this thread, set its t_waitrq if it is on a run
1118f2bd4627Sjohansen * queue.
1119f2bd4627Sjohansen */
1120f2bd4627Sjohansen if ((curthread->t_state == TS_RUN) && (curthread->t_waitrq == 0)) {
11210e751525SEric Saxe curthread->t_waitrq = now;
1122f2bd4627Sjohansen }
1123f2bd4627Sjohansen
1124f2bd4627Sjohansen /* restore next thread to previously running microstate */
1125f2bd4627Sjohansen restore_mstate(next);
1126f2bd4627Sjohansen
11277c478bd9Sstevel@tonic-gate if (dtrace_vtime_active)
11287c478bd9Sstevel@tonic-gate dtrace_vtime_switch(next);
11297c478bd9Sstevel@tonic-gate
11307c478bd9Sstevel@tonic-gate resume(next);
11317c478bd9Sstevel@tonic-gate /*
11327c478bd9Sstevel@tonic-gate * The TR_RESUME_END and TR_SWTCH_END trace points
11337c478bd9Sstevel@tonic-gate * appear at the end of resume(), because we may not
11347c478bd9Sstevel@tonic-gate * return here
11357c478bd9Sstevel@tonic-gate */
11367c478bd9Sstevel@tonic-gate }
11377c478bd9Sstevel@tonic-gate
11387c478bd9Sstevel@tonic-gate static void
cpu_resched(cpu_t * cp,pri_t tpri)11397c478bd9Sstevel@tonic-gate cpu_resched(cpu_t *cp, pri_t tpri)
11407c478bd9Sstevel@tonic-gate {
11417c478bd9Sstevel@tonic-gate int call_poke_cpu = 0;
11427c478bd9Sstevel@tonic-gate pri_t cpupri = cp->cpu_dispatch_pri;
11437c478bd9Sstevel@tonic-gate
1144455e370cSJohn Levon if (cpupri != CPU_IDLE_PRI && cpupri < tpri) {
11457c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_RESCHED,
11467c478bd9Sstevel@tonic-gate "CPU_RESCHED:Tpri %d Cpupri %d", tpri, cpupri);
11477c478bd9Sstevel@tonic-gate if (tpri >= upreemptpri && cp->cpu_runrun == 0) {
11487c478bd9Sstevel@tonic-gate cp->cpu_runrun = 1;
11497c478bd9Sstevel@tonic-gate aston(cp->cpu_dispthread);
11507c478bd9Sstevel@tonic-gate if (tpri < kpreemptpri && cp != CPU)
11517c478bd9Sstevel@tonic-gate call_poke_cpu = 1;
11527c478bd9Sstevel@tonic-gate }
11537c478bd9Sstevel@tonic-gate if (tpri >= kpreemptpri && cp->cpu_kprunrun == 0) {
11547c478bd9Sstevel@tonic-gate cp->cpu_kprunrun = 1;
11557c478bd9Sstevel@tonic-gate if (cp != CPU)
11567c478bd9Sstevel@tonic-gate call_poke_cpu = 1;
11577c478bd9Sstevel@tonic-gate }
11587c478bd9Sstevel@tonic-gate }
11597c478bd9Sstevel@tonic-gate
11607c478bd9Sstevel@tonic-gate /*
11617c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility.
11627c478bd9Sstevel@tonic-gate */
11637c478bd9Sstevel@tonic-gate membar_enter();
11647c478bd9Sstevel@tonic-gate
11657c478bd9Sstevel@tonic-gate if (call_poke_cpu)
11667c478bd9Sstevel@tonic-gate poke_cpu(cp->cpu_id);
11677c478bd9Sstevel@tonic-gate }
11687c478bd9Sstevel@tonic-gate
11697c478bd9Sstevel@tonic-gate /*
11707c478bd9Sstevel@tonic-gate * setbackdq() keeps runqs balanced such that the difference in length
11717c478bd9Sstevel@tonic-gate * between the chosen runq and the next one is no more than RUNQ_MAX_DIFF.
11727c478bd9Sstevel@tonic-gate * For threads with priorities below RUNQ_MATCH_PRI levels, the runq's lengths
11737c478bd9Sstevel@tonic-gate * must match. When per-thread TS_RUNQMATCH flag is set, setbackdq() will
11747c478bd9Sstevel@tonic-gate * try to keep runqs perfectly balanced regardless of the thread priority.
11757c478bd9Sstevel@tonic-gate */
11767c478bd9Sstevel@tonic-gate #define RUNQ_MATCH_PRI 16 /* pri below which queue lengths must match */
11777c478bd9Sstevel@tonic-gate #define RUNQ_MAX_DIFF 2 /* maximum runq length difference */
11787c478bd9Sstevel@tonic-gate #define RUNQ_LEN(cp, pri) ((cp)->cpu_disp->disp_q[pri].dq_sruncnt)
11797c478bd9Sstevel@tonic-gate
11807c478bd9Sstevel@tonic-gate /*
11816890d023SEric Saxe * Macro that evaluates to true if it is likely that the thread has cache
11826890d023SEric Saxe * warmth. This is based on the amount of time that has elapsed since the
11836890d023SEric Saxe * thread last ran. If that amount of time is less than "rechoose_interval"
11846890d023SEric Saxe * ticks, then we decide that the thread has enough cache warmth to warrant
11856890d023SEric Saxe * some affinity for t->t_cpu.
11866890d023SEric Saxe */
11876890d023SEric Saxe #define THREAD_HAS_CACHE_WARMTH(thread) \
11886890d023SEric Saxe ((thread == curthread) || \
1189d3d50737SRafael Vanoni ((ddi_get_lbolt() - thread->t_disp_time) <= rechoose_interval))
11906890d023SEric Saxe /*
11917c478bd9Sstevel@tonic-gate * Put the specified thread on the back of the dispatcher
11927c478bd9Sstevel@tonic-gate * queue corresponding to its current priority.
11937c478bd9Sstevel@tonic-gate *
11947c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state
11957c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl.
11967c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked.
11977c478bd9Sstevel@tonic-gate */
11987c478bd9Sstevel@tonic-gate void
setbackdq(kthread_t * tp)11997c478bd9Sstevel@tonic-gate setbackdq(kthread_t *tp)
12007c478bd9Sstevel@tonic-gate {
12017c478bd9Sstevel@tonic-gate dispq_t *dq;
12027c478bd9Sstevel@tonic-gate disp_t *dp;
12037c478bd9Sstevel@tonic-gate cpu_t *cp;
12047c478bd9Sstevel@tonic-gate pri_t tpri;
12057c478bd9Sstevel@tonic-gate int bound;
12066890d023SEric Saxe boolean_t self;
12077c478bd9Sstevel@tonic-gate
12087c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
12097c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
12107c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */
12117c478bd9Sstevel@tonic-gate
12127c478bd9Sstevel@tonic-gate /*
12137c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't
12147c478bd9Sstevel@tonic-gate * queue it, but wake sched.
12157c478bd9Sstevel@tonic-gate */
12167c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) {
12177c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp);
12187c478bd9Sstevel@tonic-gate return;
12197c478bd9Sstevel@tonic-gate }
12207c478bd9Sstevel@tonic-gate
12216890d023SEric Saxe self = (tp == curthread);
12226890d023SEric Saxe
1223abd41583Sgd209917 if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1224abd41583Sgd209917 bound = 1;
1225abd41583Sgd209917 else
1226abd41583Sgd209917 bound = 0;
1227abd41583Sgd209917
12287c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp);
12297c478bd9Sstevel@tonic-gate if (ncpus == 1)
12307c478bd9Sstevel@tonic-gate cp = tp->t_cpu;
1231abd41583Sgd209917 else if (!bound) {
12327c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) {
12337c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_BACK);
12347c478bd9Sstevel@tonic-gate return;
12357c478bd9Sstevel@tonic-gate }
12366890d023SEric Saxe
12377c478bd9Sstevel@tonic-gate /*
12386890d023SEric Saxe * We'll generally let this thread continue to run where
12396890d023SEric Saxe * it last ran...but will consider migration if:
1240455e370cSJohn Levon * - The thread probably doesn't have much cache warmth.
1241c3377ee9SJohn Levon * - SMT exclusion would prefer us to run elsewhere
12426890d023SEric Saxe * - The CPU where it last ran is the target of an offline
12436890d023SEric Saxe * request.
1244455e370cSJohn Levon * - The thread last ran outside its home lgroup.
12457c478bd9Sstevel@tonic-gate */
12466890d023SEric Saxe if ((!THREAD_HAS_CACHE_WARMTH(tp)) ||
1247c3377ee9SJohn Levon !smt_should_run(tp, tp->t_cpu) ||
1248455e370cSJohn Levon (tp->t_cpu == cpu_inmotion) ||
1249455e370cSJohn Levon !LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, tp->t_cpu)) {
1250455e370cSJohn Levon cp = disp_lowpri_cpu(tp->t_cpu, tp, tpri);
12516890d023SEric Saxe } else {
12526890d023SEric Saxe cp = tp->t_cpu;
12536890d023SEric Saxe }
12547c478bd9Sstevel@tonic-gate
12557c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) {
12567c478bd9Sstevel@tonic-gate int qlen;
12577c478bd9Sstevel@tonic-gate
12587c478bd9Sstevel@tonic-gate /*
1259fb2f18f8Sesaxe * Perform any CMT load balancing
12607c478bd9Sstevel@tonic-gate */
1261fb2f18f8Sesaxe cp = cmt_balance(tp, cp);
12627c478bd9Sstevel@tonic-gate
12637c478bd9Sstevel@tonic-gate /*
12647c478bd9Sstevel@tonic-gate * Balance across the run queues
12657c478bd9Sstevel@tonic-gate */
12667c478bd9Sstevel@tonic-gate qlen = RUNQ_LEN(cp, tpri);
12677c478bd9Sstevel@tonic-gate if (tpri >= RUNQ_MATCH_PRI &&
12687c478bd9Sstevel@tonic-gate !(tp->t_schedflag & TS_RUNQMATCH))
12697c478bd9Sstevel@tonic-gate qlen -= RUNQ_MAX_DIFF;
12707c478bd9Sstevel@tonic-gate if (qlen > 0) {
1271685679f7Sakolb cpu_t *newcp;
12727c478bd9Sstevel@tonic-gate
1273685679f7Sakolb if (tp->t_lpl->lpl_lgrpid == LGRP_ROOTID) {
1274685679f7Sakolb newcp = cp->cpu_next_part;
1275685679f7Sakolb } else if ((newcp = cp->cpu_next_lpl) == cp) {
1276685679f7Sakolb newcp = cp->cpu_next_part;
12777c478bd9Sstevel@tonic-gate }
1278685679f7Sakolb
1279c3377ee9SJohn Levon if (smt_should_run(tp, newcp) &&
1280455e370cSJohn Levon RUNQ_LEN(newcp, tpri) < qlen) {
1281685679f7Sakolb DTRACE_PROBE3(runq__balance,
1282685679f7Sakolb kthread_t *, tp,
1283685679f7Sakolb cpu_t *, cp, cpu_t *, newcp);
1284685679f7Sakolb cp = newcp;
1285685679f7Sakolb }
12867c478bd9Sstevel@tonic-gate }
12877c478bd9Sstevel@tonic-gate } else {
12887c478bd9Sstevel@tonic-gate /*
12897c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition.
12907c478bd9Sstevel@tonic-gate */
1291455e370cSJohn Levon cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist, tp,
1292455e370cSJohn Levon tp->t_pri);
12937c478bd9Sstevel@tonic-gate }
12947c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0);
12957c478bd9Sstevel@tonic-gate } else {
12967c478bd9Sstevel@tonic-gate /*
12977c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for
12987c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the
12997c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must
13007c478bd9Sstevel@tonic-gate * favour weak binding over strong.
13017c478bd9Sstevel@tonic-gate */
13027c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ?
13037c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu;
13047c478bd9Sstevel@tonic-gate }
1305f2bd4627Sjohansen /*
1306f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue
1307f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on
1308f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a
1309f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this
1310f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC
1311f2bd4627Sjohansen * state.
1312f2bd4627Sjohansen */
13136890d023SEric Saxe if ((!self) && (tp->t_waitrq == 0)) {
1314f2bd4627Sjohansen hrtime_t curtime;
1315f2bd4627Sjohansen
1316f2bd4627Sjohansen curtime = gethrtime_unscaled();
1317f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime);
1318f2bd4627Sjohansen tp->t_waitrq = curtime;
1319f2bd4627Sjohansen } else {
1320f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled());
1321f2bd4627Sjohansen }
1322f2bd4627Sjohansen
13237c478bd9Sstevel@tonic-gate dp = cp->cpu_disp;
13247c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock);
13257c478bd9Sstevel@tonic-gate
13267c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 0);
13277c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_DISP, TR_BACKQ, "setbackdq:pri %d cpu %p tid %p",
13287c478bd9Sstevel@tonic-gate tpri, cp, tp);
13297c478bd9Sstevel@tonic-gate
13307c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri);
13317c478bd9Sstevel@tonic-gate
13327c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */
13337c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp;
13347c478bd9Sstevel@tonic-gate tp->t_link = NULL;
13357c478bd9Sstevel@tonic-gate
13367c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri];
13377c478bd9Sstevel@tonic-gate dp->disp_nrunnable++;
1338685679f7Sakolb if (!bound)
1339685679f7Sakolb dp->disp_steal = 0;
13407c478bd9Sstevel@tonic-gate membar_enter();
13417c478bd9Sstevel@tonic-gate
13427c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) {
13437c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL);
13447c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp;
13457c478bd9Sstevel@tonic-gate dq->dq_last = tp;
13467c478bd9Sstevel@tonic-gate } else {
13477c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL);
13487c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL);
13497c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp;
13507c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri);
13517c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) {
13527c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri;
13537c478bd9Sstevel@tonic-gate membar_enter();
13547c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri);
13557c478bd9Sstevel@tonic-gate }
13567c478bd9Sstevel@tonic-gate }
13577c478bd9Sstevel@tonic-gate
13587c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) {
13596890d023SEric Saxe if (self && dp->disp_max_unbound_pri == -1 && cp == CPU) {
13607c478bd9Sstevel@tonic-gate /*
13617c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the
13627c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal
13637c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a
13647c478bd9Sstevel@tonic-gate * context switch. We may just switch to it
13657c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared
13667c478bd9Sstevel@tonic-gate * in swtch and swtch_to.
13677c478bd9Sstevel@tonic-gate */
13687c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL;
13697c478bd9Sstevel@tonic-gate }
13707c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri;
13717c478bd9Sstevel@tonic-gate }
13727c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound);
13737c478bd9Sstevel@tonic-gate }
13747c478bd9Sstevel@tonic-gate
13757c478bd9Sstevel@tonic-gate /*
13767c478bd9Sstevel@tonic-gate * Put the specified thread on the front of the dispatcher
13777c478bd9Sstevel@tonic-gate * queue corresponding to its current priority.
13787c478bd9Sstevel@tonic-gate *
13797c478bd9Sstevel@tonic-gate * Called with the thread in transition, onproc or stopped state
13807c478bd9Sstevel@tonic-gate * and locked (transition implies locked) and at high spl.
13817c478bd9Sstevel@tonic-gate * Returns with the thread in TS_RUN state and still locked.
13827c478bd9Sstevel@tonic-gate */
13837c478bd9Sstevel@tonic-gate void
setfrontdq(kthread_t * tp)13847c478bd9Sstevel@tonic-gate setfrontdq(kthread_t *tp)
13857c478bd9Sstevel@tonic-gate {
13867c478bd9Sstevel@tonic-gate disp_t *dp;
13877c478bd9Sstevel@tonic-gate dispq_t *dq;
13887c478bd9Sstevel@tonic-gate cpu_t *cp;
13897c478bd9Sstevel@tonic-gate pri_t tpri;
13907c478bd9Sstevel@tonic-gate int bound;
13917c478bd9Sstevel@tonic-gate
13927c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
13937c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
13947c478bd9Sstevel@tonic-gate ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */
13957c478bd9Sstevel@tonic-gate
13967c478bd9Sstevel@tonic-gate /*
13977c478bd9Sstevel@tonic-gate * If thread is "swapped" or on the swap queue don't
13987c478bd9Sstevel@tonic-gate * queue it, but wake sched.
13997c478bd9Sstevel@tonic-gate */
14007c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) {
14017c478bd9Sstevel@tonic-gate disp_swapped_setrun(tp);
14027c478bd9Sstevel@tonic-gate return;
14037c478bd9Sstevel@tonic-gate }
14047c478bd9Sstevel@tonic-gate
1405abd41583Sgd209917 if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1406abd41583Sgd209917 bound = 1;
1407abd41583Sgd209917 else
1408abd41583Sgd209917 bound = 0;
1409abd41583Sgd209917
14107c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp);
14117c478bd9Sstevel@tonic-gate if (ncpus == 1)
14127c478bd9Sstevel@tonic-gate cp = tp->t_cpu;
1413abd41583Sgd209917 else if (!bound) {
14147c478bd9Sstevel@tonic-gate if (tpri >= kpqpri) {
14157c478bd9Sstevel@tonic-gate setkpdq(tp, SETKP_FRONT);
14167c478bd9Sstevel@tonic-gate return;
14177c478bd9Sstevel@tonic-gate }
14187c478bd9Sstevel@tonic-gate cp = tp->t_cpu;
14197c478bd9Sstevel@tonic-gate if (tp->t_cpupart == cp->cpu_part) {
14207c478bd9Sstevel@tonic-gate /*
14216890d023SEric Saxe * We'll generally let this thread continue to run
14226890d023SEric Saxe * where it last ran, but will consider migration if:
1423455e370cSJohn Levon * - The thread last ran outside its home lgroup.
14246890d023SEric Saxe * - The CPU where it last ran is the target of an
14256890d023SEric Saxe * offline request (a thread_nomigrate() on the in
14266890d023SEric Saxe * motion CPU relies on this when forcing a preempt).
14276890d023SEric Saxe * - The thread isn't the highest priority thread where
14286890d023SEric Saxe * it last ran, and it is considered not likely to
14296890d023SEric Saxe * have significant cache warmth.
14307c478bd9Sstevel@tonic-gate */
1431455e370cSJohn Levon if (!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, cp) ||
1432455e370cSJohn Levon cp == cpu_inmotion ||
1433455e370cSJohn Levon (tpri < cp->cpu_disp->disp_maxrunpri &&
1434455e370cSJohn Levon !THREAD_HAS_CACHE_WARMTH(tp))) {
1435455e370cSJohn Levon cp = disp_lowpri_cpu(tp->t_cpu, tp, tpri);
14366890d023SEric Saxe }
14377c478bd9Sstevel@tonic-gate } else {
14387c478bd9Sstevel@tonic-gate /*
14397c478bd9Sstevel@tonic-gate * Migrate to a cpu in the new partition.
14407c478bd9Sstevel@tonic-gate */
14417c478bd9Sstevel@tonic-gate cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist,
1442455e370cSJohn Levon tp, tp->t_pri);
14437c478bd9Sstevel@tonic-gate }
14447c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0);
14457c478bd9Sstevel@tonic-gate } else {
14467c478bd9Sstevel@tonic-gate /*
14477c478bd9Sstevel@tonic-gate * It is possible that t_weakbound_cpu != t_bound_cpu (for
14487c478bd9Sstevel@tonic-gate * a short time until weak binding that existed when the
14497c478bd9Sstevel@tonic-gate * strong binding was established has dropped) so we must
14507c478bd9Sstevel@tonic-gate * favour weak binding over strong.
14517c478bd9Sstevel@tonic-gate */
14527c478bd9Sstevel@tonic-gate cp = tp->t_weakbound_cpu ?
14537c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu : tp->t_bound_cpu;
14547c478bd9Sstevel@tonic-gate }
1455f2bd4627Sjohansen
1456f2bd4627Sjohansen /*
1457f2bd4627Sjohansen * A thread that is ONPROC may be temporarily placed on the run queue
1458f2bd4627Sjohansen * but then chosen to run again by disp. If the thread we're placing on
1459f2bd4627Sjohansen * the queue is in TS_ONPROC state, don't set its t_waitrq until a
1460f2bd4627Sjohansen * replacement process is actually scheduled in swtch(). In this
1461f2bd4627Sjohansen * situation, curthread is the only thread that could be in the ONPROC
1462f2bd4627Sjohansen * state.
1463f2bd4627Sjohansen */
1464f2bd4627Sjohansen if ((tp != curthread) && (tp->t_waitrq == 0)) {
1465f2bd4627Sjohansen hrtime_t curtime;
1466f2bd4627Sjohansen
1467f2bd4627Sjohansen curtime = gethrtime_unscaled();
1468f2bd4627Sjohansen (void) cpu_update_pct(tp, curtime);
1469f2bd4627Sjohansen tp->t_waitrq = curtime;
1470f2bd4627Sjohansen } else {
1471f2bd4627Sjohansen (void) cpu_update_pct(tp, gethrtime_unscaled());
1472f2bd4627Sjohansen }
1473f2bd4627Sjohansen
14747c478bd9Sstevel@tonic-gate dp = cp->cpu_disp;
14757c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock);
14767c478bd9Sstevel@tonic-gate
14777c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp);
14787c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 1);
14797c478bd9Sstevel@tonic-gate
14807c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri);
14817c478bd9Sstevel@tonic-gate
14827c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set TS_RUN state and lock */
14837c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp;
14847c478bd9Sstevel@tonic-gate
14857c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri];
14867c478bd9Sstevel@tonic-gate dp->disp_nrunnable++;
1487685679f7Sakolb if (!bound)
1488685679f7Sakolb dp->disp_steal = 0;
14897c478bd9Sstevel@tonic-gate membar_enter();
14907c478bd9Sstevel@tonic-gate
14917c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) {
14927c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL);
14937c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first;
14947c478bd9Sstevel@tonic-gate dq->dq_first = tp;
14957c478bd9Sstevel@tonic-gate } else {
14967c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL);
14977c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL);
14987c478bd9Sstevel@tonic-gate tp->t_link = NULL;
14997c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp;
15007c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri);
15017c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) {
15027c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri;
15037c478bd9Sstevel@tonic-gate membar_enter();
15047c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri);
15057c478bd9Sstevel@tonic-gate }
15067c478bd9Sstevel@tonic-gate }
15077c478bd9Sstevel@tonic-gate
15087c478bd9Sstevel@tonic-gate if (!bound && tpri > dp->disp_max_unbound_pri) {
15097c478bd9Sstevel@tonic-gate if (tp == curthread && dp->disp_max_unbound_pri == -1 &&
15107c478bd9Sstevel@tonic-gate cp == CPU) {
15117c478bd9Sstevel@tonic-gate /*
15127c478bd9Sstevel@tonic-gate * If there are no other unbound threads on the
15137c478bd9Sstevel@tonic-gate * run queue, don't allow other CPUs to steal
15147c478bd9Sstevel@tonic-gate * this thread while we are in the middle of a
15157c478bd9Sstevel@tonic-gate * context switch. We may just switch to it
15167c478bd9Sstevel@tonic-gate * again right away. CPU_DISP_DONTSTEAL is cleared
15177c478bd9Sstevel@tonic-gate * in swtch and swtch_to.
15187c478bd9Sstevel@tonic-gate */
15197c478bd9Sstevel@tonic-gate cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL;
15207c478bd9Sstevel@tonic-gate }
15217c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri;
15227c478bd9Sstevel@tonic-gate }
15237c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, bound);
15247c478bd9Sstevel@tonic-gate }
15257c478bd9Sstevel@tonic-gate
15267c478bd9Sstevel@tonic-gate /*
15277c478bd9Sstevel@tonic-gate * Put a high-priority unbound thread on the kp queue
15287c478bd9Sstevel@tonic-gate */
15297c478bd9Sstevel@tonic-gate static void
setkpdq(kthread_t * tp,int borf)15307c478bd9Sstevel@tonic-gate setkpdq(kthread_t *tp, int borf)
15317c478bd9Sstevel@tonic-gate {
15327c478bd9Sstevel@tonic-gate dispq_t *dq;
15337c478bd9Sstevel@tonic-gate disp_t *dp;
15347c478bd9Sstevel@tonic-gate cpu_t *cp;
15357c478bd9Sstevel@tonic-gate pri_t tpri;
15367c478bd9Sstevel@tonic-gate
15377c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp);
15387c478bd9Sstevel@tonic-gate
15397c478bd9Sstevel@tonic-gate dp = &tp->t_cpupart->cp_kp_queue;
15407c478bd9Sstevel@tonic-gate disp_lock_enter_high(&dp->disp_lock);
15417c478bd9Sstevel@tonic-gate
15427c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp);
15437c478bd9Sstevel@tonic-gate
15447c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri);
15457c478bd9Sstevel@tonic-gate DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, borf);
15467c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &dp->disp_lock); /* set t_state to TS_RUN */
15477c478bd9Sstevel@tonic-gate tp->t_disp_queue = dp;
15487c478bd9Sstevel@tonic-gate dp->disp_nrunnable++;
15497c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri];
15507c478bd9Sstevel@tonic-gate
15517c478bd9Sstevel@tonic-gate if (dq->dq_sruncnt++ != 0) {
15527c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) {
15537c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first != NULL);
15547c478bd9Sstevel@tonic-gate tp->t_link = NULL;
15557c478bd9Sstevel@tonic-gate dq->dq_last->t_link = tp;
15567c478bd9Sstevel@tonic-gate dq->dq_last = tp;
15577c478bd9Sstevel@tonic-gate } else {
15587c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last != NULL);
15597c478bd9Sstevel@tonic-gate tp->t_link = dq->dq_first;
15607c478bd9Sstevel@tonic-gate dq->dq_first = tp;
15617c478bd9Sstevel@tonic-gate }
15627c478bd9Sstevel@tonic-gate } else {
15637c478bd9Sstevel@tonic-gate if (borf == SETKP_BACK) {
15647c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL);
15657c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL);
15667c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp;
15677c478bd9Sstevel@tonic-gate } else {
15687c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL);
15697c478bd9Sstevel@tonic-gate ASSERT(dq->dq_first == NULL);
15707c478bd9Sstevel@tonic-gate tp->t_link = NULL;
15717c478bd9Sstevel@tonic-gate dq->dq_first = dq->dq_last = tp;
15727c478bd9Sstevel@tonic-gate }
15737c478bd9Sstevel@tonic-gate BT_SET(dp->disp_qactmap, tpri);
15747c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri)
15757c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri;
15767c478bd9Sstevel@tonic-gate if (tpri > dp->disp_maxrunpri) {
15777c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = tpri;
15787c478bd9Sstevel@tonic-gate membar_enter();
15797c478bd9Sstevel@tonic-gate }
15807c478bd9Sstevel@tonic-gate }
15817c478bd9Sstevel@tonic-gate
15827c478bd9Sstevel@tonic-gate cp = tp->t_cpu;
15837c478bd9Sstevel@tonic-gate if (tp->t_cpupart != cp->cpu_part) {
15847c478bd9Sstevel@tonic-gate /* migrate to a cpu in the new partition */
15857c478bd9Sstevel@tonic-gate cp = tp->t_cpupart->cp_cpulist;
15867c478bd9Sstevel@tonic-gate }
1587455e370cSJohn Levon cp = disp_lowpri_cpu(cp, tp, tp->t_pri);
15887c478bd9Sstevel@tonic-gate disp_lock_enter_high(&cp->cpu_disp->disp_lock);
15897c478bd9Sstevel@tonic-gate ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0);
15907c478bd9Sstevel@tonic-gate
15917c478bd9Sstevel@tonic-gate if (cp->cpu_chosen_level < tpri)
15927c478bd9Sstevel@tonic-gate cp->cpu_chosen_level = tpri;
15937c478bd9Sstevel@tonic-gate cpu_resched(cp, tpri);
15947c478bd9Sstevel@tonic-gate disp_lock_exit_high(&cp->cpu_disp->disp_lock);
15957c478bd9Sstevel@tonic-gate (*disp_enq_thread)(cp, 0);
15967c478bd9Sstevel@tonic-gate }
15977c478bd9Sstevel@tonic-gate
15987c478bd9Sstevel@tonic-gate /*
15997c478bd9Sstevel@tonic-gate * Remove a thread from the dispatcher queue if it is on it.
16007c478bd9Sstevel@tonic-gate * It is not an error if it is not found but we return whether
16017c478bd9Sstevel@tonic-gate * or not it was found in case the caller wants to check.
16027c478bd9Sstevel@tonic-gate */
16037c478bd9Sstevel@tonic-gate int
dispdeq(kthread_t * tp)16047c478bd9Sstevel@tonic-gate dispdeq(kthread_t *tp)
16057c478bd9Sstevel@tonic-gate {
16067c478bd9Sstevel@tonic-gate disp_t *dp;
16077c478bd9Sstevel@tonic-gate dispq_t *dq;
16087c478bd9Sstevel@tonic-gate kthread_t *rp;
16097c478bd9Sstevel@tonic-gate kthread_t *trp;
16107c478bd9Sstevel@tonic-gate kthread_t **ptp;
16117c478bd9Sstevel@tonic-gate int tpri;
16127c478bd9Sstevel@tonic-gate
16137c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
16147c478bd9Sstevel@tonic-gate
16157c478bd9Sstevel@tonic-gate if (tp->t_state != TS_RUN)
16167c478bd9Sstevel@tonic-gate return (0);
16177c478bd9Sstevel@tonic-gate
16187c478bd9Sstevel@tonic-gate /*
16197c478bd9Sstevel@tonic-gate * The thread is "swapped" or is on the swap queue and
16207c478bd9Sstevel@tonic-gate * hence no longer on the run queue, so return true.
16217c478bd9Sstevel@tonic-gate */
16227c478bd9Sstevel@tonic-gate if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD)
16237c478bd9Sstevel@tonic-gate return (1);
16247c478bd9Sstevel@tonic-gate
16257c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp);
16267c478bd9Sstevel@tonic-gate dp = tp->t_disp_queue;
16277c478bd9Sstevel@tonic-gate ASSERT(tpri < dp->disp_npri);
16287c478bd9Sstevel@tonic-gate dq = &dp->disp_q[tpri];
16297c478bd9Sstevel@tonic-gate ptp = &dq->dq_first;
16307c478bd9Sstevel@tonic-gate rp = *ptp;
16317c478bd9Sstevel@tonic-gate trp = NULL;
16327c478bd9Sstevel@tonic-gate
16337c478bd9Sstevel@tonic-gate ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL);
16347c478bd9Sstevel@tonic-gate
16357c478bd9Sstevel@tonic-gate /*
16367c478bd9Sstevel@tonic-gate * Search for thread in queue.
16377c478bd9Sstevel@tonic-gate * Double links would simplify this at the expense of disp/setrun.
16387c478bd9Sstevel@tonic-gate */
16397c478bd9Sstevel@tonic-gate while (rp != tp && rp != NULL) {
16407c478bd9Sstevel@tonic-gate trp = rp;
16417c478bd9Sstevel@tonic-gate ptp = &trp->t_link;
16427c478bd9Sstevel@tonic-gate rp = trp->t_link;
16437c478bd9Sstevel@tonic-gate }
16447c478bd9Sstevel@tonic-gate
16457c478bd9Sstevel@tonic-gate if (rp == NULL) {
16467c478bd9Sstevel@tonic-gate panic("dispdeq: thread not on queue");
16477c478bd9Sstevel@tonic-gate }
16487c478bd9Sstevel@tonic-gate
16497c478bd9Sstevel@tonic-gate DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp);
16507c478bd9Sstevel@tonic-gate
16517c478bd9Sstevel@tonic-gate /*
16527c478bd9Sstevel@tonic-gate * Found it so remove it from queue.
16537c478bd9Sstevel@tonic-gate */
16547c478bd9Sstevel@tonic-gate if ((*ptp = rp->t_link) == NULL)
16557c478bd9Sstevel@tonic-gate dq->dq_last = trp;
16567c478bd9Sstevel@tonic-gate
16577c478bd9Sstevel@tonic-gate dp->disp_nrunnable--;
16587c478bd9Sstevel@tonic-gate if (--dq->dq_sruncnt == 0) {
16597c478bd9Sstevel@tonic-gate dp->disp_qactmap[tpri >> BT_ULSHIFT] &= ~BT_BIW(tpri);
16607c478bd9Sstevel@tonic-gate if (dp->disp_nrunnable == 0) {
16617c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = -1;
16627c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = -1;
16637c478bd9Sstevel@tonic-gate } else if (tpri == dp->disp_maxrunpri) {
16647c478bd9Sstevel@tonic-gate int ipri;
16657c478bd9Sstevel@tonic-gate
16667c478bd9Sstevel@tonic-gate ipri = bt_gethighbit(dp->disp_qactmap,
16677c478bd9Sstevel@tonic-gate dp->disp_maxrunpri >> BT_ULSHIFT);
16687c478bd9Sstevel@tonic-gate if (ipri < dp->disp_max_unbound_pri)
16697c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = ipri;
16707c478bd9Sstevel@tonic-gate dp->disp_maxrunpri = ipri;
16717c478bd9Sstevel@tonic-gate }
16727c478bd9Sstevel@tonic-gate }
16737c478bd9Sstevel@tonic-gate tp->t_link = NULL;
16747c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp); /* put in intermediate state */
16757c478bd9Sstevel@tonic-gate return (1);
16767c478bd9Sstevel@tonic-gate }
16777c478bd9Sstevel@tonic-gate
16787c478bd9Sstevel@tonic-gate
16797c478bd9Sstevel@tonic-gate /*
16807c478bd9Sstevel@tonic-gate * dq_sruninc and dq_srundec are public functions for
16817c478bd9Sstevel@tonic-gate * incrementing/decrementing the sruncnts when a thread on
16827c478bd9Sstevel@tonic-gate * a dispatcher queue is made schedulable/unschedulable by
16837c478bd9Sstevel@tonic-gate * resetting the TS_LOAD flag.
16847c478bd9Sstevel@tonic-gate *
16857c478bd9Sstevel@tonic-gate * The caller MUST have the thread lock and therefore the dispatcher
16867c478bd9Sstevel@tonic-gate * queue lock so that the operation which changes
16877c478bd9Sstevel@tonic-gate * the flag, the operation that checks the status of the thread to
16887c478bd9Sstevel@tonic-gate * determine if it's on a disp queue AND the call to this function
16897c478bd9Sstevel@tonic-gate * are one atomic operation with respect to interrupts.
16907c478bd9Sstevel@tonic-gate */
16917c478bd9Sstevel@tonic-gate
16927c478bd9Sstevel@tonic-gate /*
16937c478bd9Sstevel@tonic-gate * Called by sched AFTER TS_LOAD flag is set on a swapped, runnable thread.
16947c478bd9Sstevel@tonic-gate */
16957c478bd9Sstevel@tonic-gate void
dq_sruninc(kthread_t * t)16967c478bd9Sstevel@tonic-gate dq_sruninc(kthread_t *t)
16977c478bd9Sstevel@tonic-gate {
16987c478bd9Sstevel@tonic-gate ASSERT(t->t_state == TS_RUN);
16997c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD);
17007c478bd9Sstevel@tonic-gate
17017c478bd9Sstevel@tonic-gate THREAD_TRANSITION(t);
17027c478bd9Sstevel@tonic-gate setfrontdq(t);
17037c478bd9Sstevel@tonic-gate }
17047c478bd9Sstevel@tonic-gate
17057c478bd9Sstevel@tonic-gate /*
17067c478bd9Sstevel@tonic-gate * See comment on calling conventions above.
17077c478bd9Sstevel@tonic-gate * Called by sched BEFORE TS_LOAD flag is cleared on a runnable thread.
17087c478bd9Sstevel@tonic-gate */
17097c478bd9Sstevel@tonic-gate void
dq_srundec(kthread_t * t)17107c478bd9Sstevel@tonic-gate dq_srundec(kthread_t *t)
17117c478bd9Sstevel@tonic-gate {
17127c478bd9Sstevel@tonic-gate ASSERT(t->t_schedflag & TS_LOAD);
17137c478bd9Sstevel@tonic-gate
17147c478bd9Sstevel@tonic-gate (void) dispdeq(t);
17157c478bd9Sstevel@tonic-gate disp_swapped_enq(t);
17167c478bd9Sstevel@tonic-gate }
17177c478bd9Sstevel@tonic-gate
17187c478bd9Sstevel@tonic-gate /*
17197c478bd9Sstevel@tonic-gate * Change the dispatcher lock of thread to the "swapped_lock"
17207c478bd9Sstevel@tonic-gate * and return with thread lock still held.
17217c478bd9Sstevel@tonic-gate *
17227c478bd9Sstevel@tonic-gate * Called with thread_lock held, in transition state, and at high spl.
17237c478bd9Sstevel@tonic-gate */
17247c478bd9Sstevel@tonic-gate void
disp_swapped_enq(kthread_t * tp)17257c478bd9Sstevel@tonic-gate disp_swapped_enq(kthread_t *tp)
17267c478bd9Sstevel@tonic-gate {
17277c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
17287c478bd9Sstevel@tonic-gate ASSERT(tp->t_schedflag & TS_LOAD);
17297c478bd9Sstevel@tonic-gate
17307c478bd9Sstevel@tonic-gate switch (tp->t_state) {
17317c478bd9Sstevel@tonic-gate case TS_RUN:
17327c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock);
17337c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */
17347c478bd9Sstevel@tonic-gate break;
17357c478bd9Sstevel@tonic-gate case TS_ONPROC:
17367c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock);
17377c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp);
17387c478bd9Sstevel@tonic-gate wake_sched_sec = 1; /* tell clock to wake sched */
17397c478bd9Sstevel@tonic-gate THREAD_SWAP(tp, &swapped_lock); /* set TS_RUN state and lock */
17407c478bd9Sstevel@tonic-gate break;
17417c478bd9Sstevel@tonic-gate default:
17427c478bd9Sstevel@tonic-gate panic("disp_swapped: tp: %p bad t_state", (void *)tp);
17437c478bd9Sstevel@tonic-gate }
17447c478bd9Sstevel@tonic-gate }
17457c478bd9Sstevel@tonic-gate
17467c478bd9Sstevel@tonic-gate /*
17477c478bd9Sstevel@tonic-gate * This routine is called by setbackdq/setfrontdq if the thread is
17487c478bd9Sstevel@tonic-gate * not loaded or loaded and on the swap queue.
17497c478bd9Sstevel@tonic-gate *
17507c478bd9Sstevel@tonic-gate * Thread state TS_SLEEP implies that a swapped thread
17517c478bd9Sstevel@tonic-gate * has been woken up and needs to be swapped in by the swapper.
17527c478bd9Sstevel@tonic-gate *
17537c478bd9Sstevel@tonic-gate * Thread state TS_RUN, it implies that the priority of a swapped
17547c478bd9Sstevel@tonic-gate * thread is being increased by scheduling class (e.g. ts_update).
17557c478bd9Sstevel@tonic-gate */
17567c478bd9Sstevel@tonic-gate static void
disp_swapped_setrun(kthread_t * tp)17577c478bd9Sstevel@tonic-gate disp_swapped_setrun(kthread_t *tp)
17587c478bd9Sstevel@tonic-gate {
17597c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
17607c478bd9Sstevel@tonic-gate ASSERT((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD);
17617c478bd9Sstevel@tonic-gate
17627c478bd9Sstevel@tonic-gate switch (tp->t_state) {
17637c478bd9Sstevel@tonic-gate case TS_SLEEP:
17647c478bd9Sstevel@tonic-gate disp_lock_enter_high(&swapped_lock);
17657c478bd9Sstevel@tonic-gate /*
17667c478bd9Sstevel@tonic-gate * Wakeup sched immediately (i.e., next tick) if the
17677c478bd9Sstevel@tonic-gate * thread priority is above maxclsyspri.
17687c478bd9Sstevel@tonic-gate */
17697c478bd9Sstevel@tonic-gate if (DISP_PRIO(tp) > maxclsyspri)
17707c478bd9Sstevel@tonic-gate wake_sched = 1;
17717c478bd9Sstevel@tonic-gate else
17727c478bd9Sstevel@tonic-gate wake_sched_sec = 1;
17737c478bd9Sstevel@tonic-gate THREAD_RUN(tp, &swapped_lock); /* set TS_RUN state and lock */
17747c478bd9Sstevel@tonic-gate break;
17757c478bd9Sstevel@tonic-gate case TS_RUN: /* called from ts_update */
17767c478bd9Sstevel@tonic-gate break;
17777c478bd9Sstevel@tonic-gate default:
17788793b36bSNick Todd panic("disp_swapped_setrun: tp: %p bad t_state", (void *)tp);
17797c478bd9Sstevel@tonic-gate }
17807c478bd9Sstevel@tonic-gate }
17817c478bd9Sstevel@tonic-gate
17827c478bd9Sstevel@tonic-gate /*
17837c478bd9Sstevel@tonic-gate * Make a thread give up its processor. Find the processor on
17847c478bd9Sstevel@tonic-gate * which this thread is executing, and have that processor
17857c478bd9Sstevel@tonic-gate * preempt.
178635a5a358SJonathan Adams *
178735a5a358SJonathan Adams * We allow System Duty Cycle (SDC) threads to be preempted even if
178835a5a358SJonathan Adams * they are running at kernel priorities. To implement this, we always
178935a5a358SJonathan Adams * set cpu_kprunrun; this ensures preempt() will be called. Since SDC
179035a5a358SJonathan Adams * calls cpu_surrender() very often, we only preempt if there is anyone
179135a5a358SJonathan Adams * competing with us.
17927c478bd9Sstevel@tonic-gate */
17937c478bd9Sstevel@tonic-gate void
cpu_surrender(kthread_t * tp)17947c478bd9Sstevel@tonic-gate cpu_surrender(kthread_t *tp)
17957c478bd9Sstevel@tonic-gate {
17967c478bd9Sstevel@tonic-gate cpu_t *cpup;
17977c478bd9Sstevel@tonic-gate int max_pri;
17987c478bd9Sstevel@tonic-gate int max_run_pri;
17997c478bd9Sstevel@tonic-gate klwp_t *lwp;
18007c478bd9Sstevel@tonic-gate
18017c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
18027c478bd9Sstevel@tonic-gate
18037c478bd9Sstevel@tonic-gate if (tp->t_state != TS_ONPROC)
18047c478bd9Sstevel@tonic-gate return;
18057c478bd9Sstevel@tonic-gate cpup = tp->t_disp_queue->disp_cpu; /* CPU thread dispatched to */
18067c478bd9Sstevel@tonic-gate max_pri = cpup->cpu_disp->disp_maxrunpri; /* best pri of that CPU */
18077c478bd9Sstevel@tonic-gate max_run_pri = CP_MAXRUNPRI(cpup->cpu_part);
18087c478bd9Sstevel@tonic-gate if (max_pri < max_run_pri)
18097c478bd9Sstevel@tonic-gate max_pri = max_run_pri;
18107c478bd9Sstevel@tonic-gate
181135a5a358SJonathan Adams if (tp->t_cid == sysdccid) {
181235a5a358SJonathan Adams uint_t t_pri = DISP_PRIO(tp);
181335a5a358SJonathan Adams if (t_pri > max_pri)
181435a5a358SJonathan Adams return; /* we are not competing w/ anyone */
181535a5a358SJonathan Adams cpup->cpu_runrun = cpup->cpu_kprunrun = 1;
181635a5a358SJonathan Adams } else {
18177c478bd9Sstevel@tonic-gate cpup->cpu_runrun = 1;
18187c478bd9Sstevel@tonic-gate if (max_pri >= kpreemptpri && cpup->cpu_kprunrun == 0) {
18197c478bd9Sstevel@tonic-gate cpup->cpu_kprunrun = 1;
18207c478bd9Sstevel@tonic-gate }
182135a5a358SJonathan Adams }
18227c478bd9Sstevel@tonic-gate
18237c478bd9Sstevel@tonic-gate /*
18247c478bd9Sstevel@tonic-gate * Propagate cpu_runrun, and cpu_kprunrun to global visibility.
18257c478bd9Sstevel@tonic-gate */
18267c478bd9Sstevel@tonic-gate membar_enter();
18277c478bd9Sstevel@tonic-gate
18287c478bd9Sstevel@tonic-gate DTRACE_SCHED1(surrender, kthread_t *, tp);
18297c478bd9Sstevel@tonic-gate
18307c478bd9Sstevel@tonic-gate /*
18317c478bd9Sstevel@tonic-gate * Make the target thread take an excursion through trap()
18327c478bd9Sstevel@tonic-gate * to do preempt() (unless we're already in trap or post_syscall,
18337c478bd9Sstevel@tonic-gate * calling cpu_surrender via CL_TRAPRET).
18347c478bd9Sstevel@tonic-gate */
18357c478bd9Sstevel@tonic-gate if (tp != curthread || (lwp = tp->t_lwp) == NULL ||
18367c478bd9Sstevel@tonic-gate lwp->lwp_state != LWP_USER) {
18377c478bd9Sstevel@tonic-gate aston(tp);
18387c478bd9Sstevel@tonic-gate if (cpup != CPU)
18397c478bd9Sstevel@tonic-gate poke_cpu(cpup->cpu_id);
18407c478bd9Sstevel@tonic-gate }
18417c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_DISP, TR_CPU_SURRENDER,
18427c478bd9Sstevel@tonic-gate "cpu_surrender:tid %p cpu %p", tp, cpup);
18437c478bd9Sstevel@tonic-gate }
18447c478bd9Sstevel@tonic-gate
18457c478bd9Sstevel@tonic-gate /*
18467c478bd9Sstevel@tonic-gate * Commit to and ratify a scheduling decision
18477c478bd9Sstevel@tonic-gate */
18487c478bd9Sstevel@tonic-gate /*ARGSUSED*/
18497c478bd9Sstevel@tonic-gate static kthread_t *
disp_ratify(kthread_t * tp,disp_t * kpq)18507c478bd9Sstevel@tonic-gate disp_ratify(kthread_t *tp, disp_t *kpq)
18517c478bd9Sstevel@tonic-gate {
18527c478bd9Sstevel@tonic-gate pri_t tpri, maxpri;
18537c478bd9Sstevel@tonic-gate pri_t maxkpri;
18547c478bd9Sstevel@tonic-gate cpu_t *cpup;
18557c478bd9Sstevel@tonic-gate
18567c478bd9Sstevel@tonic-gate ASSERT(tp != NULL);
18577c478bd9Sstevel@tonic-gate /*
18587c478bd9Sstevel@tonic-gate * Commit to, then ratify scheduling decision
18597c478bd9Sstevel@tonic-gate */
18607c478bd9Sstevel@tonic-gate cpup = CPU;
18617c478bd9Sstevel@tonic-gate if (cpup->cpu_runrun != 0)
18627c478bd9Sstevel@tonic-gate cpup->cpu_runrun = 0;
18637c478bd9Sstevel@tonic-gate if (cpup->cpu_kprunrun != 0)
18647c478bd9Sstevel@tonic-gate cpup->cpu_kprunrun = 0;
18657c478bd9Sstevel@tonic-gate if (cpup->cpu_chosen_level != -1)
18667c478bd9Sstevel@tonic-gate cpup->cpu_chosen_level = -1;
18677c478bd9Sstevel@tonic-gate membar_enter();
18687c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp);
18697c478bd9Sstevel@tonic-gate maxpri = cpup->cpu_disp->disp_maxrunpri;
18707c478bd9Sstevel@tonic-gate maxkpri = kpq->disp_maxrunpri;
18717c478bd9Sstevel@tonic-gate if (maxpri < maxkpri)
18727c478bd9Sstevel@tonic-gate maxpri = maxkpri;
18737c478bd9Sstevel@tonic-gate if (tpri < maxpri) {
18747c478bd9Sstevel@tonic-gate /*
18757c478bd9Sstevel@tonic-gate * should have done better
18767c478bd9Sstevel@tonic-gate * put this one back and indicate to try again
18777c478bd9Sstevel@tonic-gate */
18787c478bd9Sstevel@tonic-gate cpup->cpu_dispthread = curthread; /* fixup dispthread */
18797c478bd9Sstevel@tonic-gate cpup->cpu_dispatch_pri = DISP_PRIO(curthread);
18807c478bd9Sstevel@tonic-gate thread_lock_high(tp);
18817c478bd9Sstevel@tonic-gate THREAD_TRANSITION(tp);
18827c478bd9Sstevel@tonic-gate setfrontdq(tp);
18837c478bd9Sstevel@tonic-gate thread_unlock_nopreempt(tp);
18847c478bd9Sstevel@tonic-gate
18857c478bd9Sstevel@tonic-gate tp = NULL;
18867c478bd9Sstevel@tonic-gate }
18877c478bd9Sstevel@tonic-gate return (tp);
18887c478bd9Sstevel@tonic-gate }
18897c478bd9Sstevel@tonic-gate
18907c478bd9Sstevel@tonic-gate /*
18917c478bd9Sstevel@tonic-gate * See if there is any work on the dispatcher queue for other CPUs.
18927c478bd9Sstevel@tonic-gate * If there is, dequeue the best thread and return.
18937c478bd9Sstevel@tonic-gate */
18947c478bd9Sstevel@tonic-gate static kthread_t *
disp_getwork(cpu_t * cp)18957c478bd9Sstevel@tonic-gate disp_getwork(cpu_t *cp)
18967c478bd9Sstevel@tonic-gate {
18977c478bd9Sstevel@tonic-gate cpu_t *ocp; /* other CPU */
18987c478bd9Sstevel@tonic-gate cpu_t *ocp_start;
18997c478bd9Sstevel@tonic-gate cpu_t *tcp; /* target local CPU */
19007c478bd9Sstevel@tonic-gate kthread_t *tp;
1901685679f7Sakolb kthread_t *retval = NULL;
19027c478bd9Sstevel@tonic-gate pri_t maxpri;
19037c478bd9Sstevel@tonic-gate disp_t *kpq; /* kp queue for this partition */
19047c478bd9Sstevel@tonic-gate lpl_t *lpl, *lpl_leaf;
19056890d023SEric Saxe int leafidx, startidx;
1906685679f7Sakolb hrtime_t stealtime;
19076890d023SEric Saxe lgrp_id_t local_id;
19087c478bd9Sstevel@tonic-gate
19097c478bd9Sstevel@tonic-gate maxpri = -1;
19107c478bd9Sstevel@tonic-gate tcp = NULL;
19117c478bd9Sstevel@tonic-gate
19127c478bd9Sstevel@tonic-gate kpq = &cp->cpu_part->cp_kp_queue;
19137c478bd9Sstevel@tonic-gate while (kpq->disp_maxrunpri >= 0) {
19147c478bd9Sstevel@tonic-gate /*
19157c478bd9Sstevel@tonic-gate * Try to take a thread from the kp_queue.
19167c478bd9Sstevel@tonic-gate */
19177c478bd9Sstevel@tonic-gate tp = (disp_getbest(kpq));
19187c478bd9Sstevel@tonic-gate if (tp)
19197c478bd9Sstevel@tonic-gate return (disp_ratify(tp, kpq));
19207c478bd9Sstevel@tonic-gate }
19217c478bd9Sstevel@tonic-gate
1922ab761399Sesaxe kpreempt_disable(); /* protect the cpu_active list */
19237c478bd9Sstevel@tonic-gate
19247c478bd9Sstevel@tonic-gate /*
19257c478bd9Sstevel@tonic-gate * Try to find something to do on another CPU's run queue.
19267c478bd9Sstevel@tonic-gate * Loop through all other CPUs looking for the one with the highest
19277c478bd9Sstevel@tonic-gate * priority unbound thread.
19287c478bd9Sstevel@tonic-gate *
19297c478bd9Sstevel@tonic-gate * On NUMA machines, the partition's CPUs are consulted in order of
19307c478bd9Sstevel@tonic-gate * distance from the current CPU. This way, the first available
19317c478bd9Sstevel@tonic-gate * work found is also the closest, and will suffer the least
19327c478bd9Sstevel@tonic-gate * from being migrated.
19337c478bd9Sstevel@tonic-gate */
19347c478bd9Sstevel@tonic-gate lpl = lpl_leaf = cp->cpu_lpl;
19356890d023SEric Saxe local_id = lpl_leaf->lpl_lgrpid;
19366890d023SEric Saxe leafidx = startidx = 0;
19377c478bd9Sstevel@tonic-gate
19387c478bd9Sstevel@tonic-gate /*
19397c478bd9Sstevel@tonic-gate * This loop traverses the lpl hierarchy. Higher level lpls represent
19407c478bd9Sstevel@tonic-gate * broader levels of locality
19417c478bd9Sstevel@tonic-gate */
19427c478bd9Sstevel@tonic-gate do {
19437c478bd9Sstevel@tonic-gate /* This loop iterates over the lpl's leaves */
19447c478bd9Sstevel@tonic-gate do {
19457c478bd9Sstevel@tonic-gate if (lpl_leaf != cp->cpu_lpl)
19467c478bd9Sstevel@tonic-gate ocp = lpl_leaf->lpl_cpus;
19477c478bd9Sstevel@tonic-gate else
19487c478bd9Sstevel@tonic-gate ocp = cp->cpu_next_lpl;
19497c478bd9Sstevel@tonic-gate
19507c478bd9Sstevel@tonic-gate /* This loop iterates over the CPUs in the leaf */
19517c478bd9Sstevel@tonic-gate ocp_start = ocp;
19527c478bd9Sstevel@tonic-gate do {
19537c478bd9Sstevel@tonic-gate pri_t pri;
19547c478bd9Sstevel@tonic-gate
19557c478bd9Sstevel@tonic-gate ASSERT(CPU_ACTIVE(ocp));
19567c478bd9Sstevel@tonic-gate
19577c478bd9Sstevel@tonic-gate /*
195839bac370Sesaxe * End our stroll around this lpl if:
19597c478bd9Sstevel@tonic-gate *
19607c478bd9Sstevel@tonic-gate * - Something became runnable on the local
196139bac370Sesaxe * queue...which also ends our stroll around
196239bac370Sesaxe * the partition.
19637c478bd9Sstevel@tonic-gate *
196439bac370Sesaxe * - We happen across another idle CPU.
196539bac370Sesaxe * Since it is patrolling the next portion
196639bac370Sesaxe * of the lpl's list (assuming it's not
19676890d023SEric Saxe * halted, or busy servicing an interrupt),
19686890d023SEric Saxe * move to the next higher level of locality.
19697c478bd9Sstevel@tonic-gate */
197039bac370Sesaxe if (cp->cpu_disp->disp_nrunnable != 0) {
197139bac370Sesaxe kpreempt_enable();
197239bac370Sesaxe return (NULL);
197339bac370Sesaxe }
19747c478bd9Sstevel@tonic-gate if (ocp->cpu_dispatch_pri == -1) {
19757c478bd9Sstevel@tonic-gate if (ocp->cpu_disp_flags &
19766890d023SEric Saxe CPU_DISP_HALTED ||
19776890d023SEric Saxe ocp->cpu_intr_actv != 0)
19787c478bd9Sstevel@tonic-gate continue;
197939bac370Sesaxe else
19806890d023SEric Saxe goto next_level;
19817c478bd9Sstevel@tonic-gate }
19827c478bd9Sstevel@tonic-gate
19837c478bd9Sstevel@tonic-gate /*
19847c478bd9Sstevel@tonic-gate * If there's only one thread and the CPU
19857c478bd9Sstevel@tonic-gate * is in the middle of a context switch,
19867c478bd9Sstevel@tonic-gate * or it's currently running the idle thread,
19877c478bd9Sstevel@tonic-gate * don't steal it.
19887c478bd9Sstevel@tonic-gate */
19897c478bd9Sstevel@tonic-gate if ((ocp->cpu_disp_flags &
19907c478bd9Sstevel@tonic-gate CPU_DISP_DONTSTEAL) &&
19917c478bd9Sstevel@tonic-gate ocp->cpu_disp->disp_nrunnable == 1)
19927c478bd9Sstevel@tonic-gate continue;
19937c478bd9Sstevel@tonic-gate
19947c478bd9Sstevel@tonic-gate pri = ocp->cpu_disp->disp_max_unbound_pri;
19957c478bd9Sstevel@tonic-gate if (pri > maxpri) {
1996685679f7Sakolb /*
1997685679f7Sakolb * Don't steal threads that we attempted
1998fb2f18f8Sesaxe * to steal recently until they're ready
1999fb2f18f8Sesaxe * to be stolen again.
2000685679f7Sakolb */
2001685679f7Sakolb stealtime = ocp->cpu_disp->disp_steal;
2002685679f7Sakolb if (stealtime == 0 ||
2003685679f7Sakolb stealtime - gethrtime() <= 0) {
20047c478bd9Sstevel@tonic-gate maxpri = pri;
20057c478bd9Sstevel@tonic-gate tcp = ocp;
2006685679f7Sakolb } else {
2007685679f7Sakolb /*
2008685679f7Sakolb * Don't update tcp, just set
2009685679f7Sakolb * the retval to T_DONTSTEAL, so
2010685679f7Sakolb * that if no acceptable CPUs
2011685679f7Sakolb * are found the return value
2012685679f7Sakolb * will be T_DONTSTEAL rather
2013685679f7Sakolb * then NULL.
2014685679f7Sakolb */
2015685679f7Sakolb retval = T_DONTSTEAL;
2016685679f7Sakolb }
20177c478bd9Sstevel@tonic-gate }
20187c478bd9Sstevel@tonic-gate } while ((ocp = ocp->cpu_next_lpl) != ocp_start);
20197c478bd9Sstevel@tonic-gate
20206890d023SEric Saxe /*
20216890d023SEric Saxe * Iterate to the next leaf lpl in the resource set
20226890d023SEric Saxe * at this level of locality. If we hit the end of
20236890d023SEric Saxe * the set, wrap back around to the beginning.
20246890d023SEric Saxe *
20256890d023SEric Saxe * Note: This iteration is NULL terminated for a reason
20266890d023SEric Saxe * see lpl_topo_bootstrap() in lgrp.c for details.
20276890d023SEric Saxe */
20287c478bd9Sstevel@tonic-gate if ((lpl_leaf = lpl->lpl_rset[++leafidx]) == NULL) {
20297c478bd9Sstevel@tonic-gate leafidx = 0;
20307c478bd9Sstevel@tonic-gate lpl_leaf = lpl->lpl_rset[leafidx];
20317c478bd9Sstevel@tonic-gate }
20326890d023SEric Saxe } while (leafidx != startidx);
20337c478bd9Sstevel@tonic-gate
20346890d023SEric Saxe next_level:
20356890d023SEric Saxe /*
20366890d023SEric Saxe * Expand the search to include farther away CPUs (next
20376890d023SEric Saxe * locality level). The closer CPUs that have already been
20386890d023SEric Saxe * checked will be checked again. In doing so, idle CPUs
20396890d023SEric Saxe * will tend to be more aggresive about stealing from CPUs
20406890d023SEric Saxe * that are closer (since the closer CPUs will be considered
20416890d023SEric Saxe * more often).
20426890d023SEric Saxe * Begin at this level with the CPUs local leaf lpl.
20436890d023SEric Saxe */
20446890d023SEric Saxe if ((lpl = lpl->lpl_parent) != NULL) {
20456890d023SEric Saxe leafidx = startidx = lpl->lpl_id2rset[local_id];
20466890d023SEric Saxe lpl_leaf = lpl->lpl_rset[leafidx];
20476890d023SEric Saxe }
20487c478bd9Sstevel@tonic-gate } while (!tcp && lpl);
20497c478bd9Sstevel@tonic-gate
2050ab761399Sesaxe kpreempt_enable();
20517c478bd9Sstevel@tonic-gate
20527c478bd9Sstevel@tonic-gate /*
20537c478bd9Sstevel@tonic-gate * If another queue looks good, and there is still nothing on
20547c478bd9Sstevel@tonic-gate * the local queue, try to transfer one or more threads
20557c478bd9Sstevel@tonic-gate * from it to our queue.
20567c478bd9Sstevel@tonic-gate */
20577c478bd9Sstevel@tonic-gate if (tcp && cp->cpu_disp->disp_nrunnable == 0) {
2058685679f7Sakolb tp = disp_getbest(tcp->cpu_disp);
2059685679f7Sakolb if (tp == NULL || tp == T_DONTSTEAL)
2060685679f7Sakolb return (tp);
20617c478bd9Sstevel@tonic-gate return (disp_ratify(tp, kpq));
20627c478bd9Sstevel@tonic-gate }
2063685679f7Sakolb return (retval);
20647c478bd9Sstevel@tonic-gate }
20657c478bd9Sstevel@tonic-gate
20667c478bd9Sstevel@tonic-gate
20677c478bd9Sstevel@tonic-gate /*
20687c478bd9Sstevel@tonic-gate * disp_fix_unbound_pri()
20697c478bd9Sstevel@tonic-gate * Determines the maximum priority of unbound threads on the queue.
20707c478bd9Sstevel@tonic-gate * The priority is kept for the queue, but is only increased, never
20717c478bd9Sstevel@tonic-gate * reduced unless some CPU is looking for something on that queue.
20727c478bd9Sstevel@tonic-gate *
20737c478bd9Sstevel@tonic-gate * The priority argument is the known upper limit.
20747c478bd9Sstevel@tonic-gate *
20757c478bd9Sstevel@tonic-gate * Perhaps this should be kept accurately, but that probably means
20767c478bd9Sstevel@tonic-gate * separate bitmaps for bound and unbound threads. Since only idled
20777c478bd9Sstevel@tonic-gate * CPUs will have to do this recalculation, it seems better this way.
20787c478bd9Sstevel@tonic-gate */
20797c478bd9Sstevel@tonic-gate static void
disp_fix_unbound_pri(disp_t * dp,pri_t pri)20807c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(disp_t *dp, pri_t pri)
20817c478bd9Sstevel@tonic-gate {
20827c478bd9Sstevel@tonic-gate kthread_t *tp;
20837c478bd9Sstevel@tonic-gate dispq_t *dq;
20847c478bd9Sstevel@tonic-gate ulong_t *dqactmap = dp->disp_qactmap;
20857c478bd9Sstevel@tonic-gate ulong_t mapword;
20867c478bd9Sstevel@tonic-gate int wx;
20877c478bd9Sstevel@tonic-gate
20887c478bd9Sstevel@tonic-gate ASSERT(DISP_LOCK_HELD(&dp->disp_lock));
20897c478bd9Sstevel@tonic-gate
20907c478bd9Sstevel@tonic-gate ASSERT(pri >= 0); /* checked by caller */
20917c478bd9Sstevel@tonic-gate
20927c478bd9Sstevel@tonic-gate /*
20937c478bd9Sstevel@tonic-gate * Start the search at the next lowest priority below the supplied
20947c478bd9Sstevel@tonic-gate * priority. This depends on the bitmap implementation.
20957c478bd9Sstevel@tonic-gate */
20967c478bd9Sstevel@tonic-gate do {
20977c478bd9Sstevel@tonic-gate wx = pri >> BT_ULSHIFT; /* index of word in map */
20987c478bd9Sstevel@tonic-gate
20997c478bd9Sstevel@tonic-gate /*
21007c478bd9Sstevel@tonic-gate * Form mask for all lower priorities in the word.
21017c478bd9Sstevel@tonic-gate */
21027c478bd9Sstevel@tonic-gate mapword = dqactmap[wx] & (BT_BIW(pri) - 1);
21037c478bd9Sstevel@tonic-gate
21047c478bd9Sstevel@tonic-gate /*
21057c478bd9Sstevel@tonic-gate * Get next lower active priority.
21067c478bd9Sstevel@tonic-gate */
21077c478bd9Sstevel@tonic-gate if (mapword != 0) {
21087c478bd9Sstevel@tonic-gate pri = (wx << BT_ULSHIFT) + highbit(mapword) - 1;
21097c478bd9Sstevel@tonic-gate } else if (wx > 0) {
21107c478bd9Sstevel@tonic-gate pri = bt_gethighbit(dqactmap, wx - 1); /* sign extend */
21117c478bd9Sstevel@tonic-gate if (pri < 0)
21127c478bd9Sstevel@tonic-gate break;
21137c478bd9Sstevel@tonic-gate } else {
21147c478bd9Sstevel@tonic-gate pri = -1;
21157c478bd9Sstevel@tonic-gate break;
21167c478bd9Sstevel@tonic-gate }
21177c478bd9Sstevel@tonic-gate
21187c478bd9Sstevel@tonic-gate /*
21197c478bd9Sstevel@tonic-gate * Search the queue for unbound, runnable threads.
21207c478bd9Sstevel@tonic-gate */
21217c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri];
21227c478bd9Sstevel@tonic-gate tp = dq->dq_first;
21237c478bd9Sstevel@tonic-gate
21247c478bd9Sstevel@tonic-gate while (tp && (tp->t_bound_cpu || tp->t_weakbound_cpu)) {
21257c478bd9Sstevel@tonic-gate tp = tp->t_link;
21267c478bd9Sstevel@tonic-gate }
21277c478bd9Sstevel@tonic-gate
21287c478bd9Sstevel@tonic-gate /*
21297c478bd9Sstevel@tonic-gate * If a thread was found, set the priority and return.
21307c478bd9Sstevel@tonic-gate */
21317c478bd9Sstevel@tonic-gate } while (tp == NULL);
21327c478bd9Sstevel@tonic-gate
21337c478bd9Sstevel@tonic-gate /*
21347c478bd9Sstevel@tonic-gate * pri holds the maximum unbound thread priority or -1.
21357c478bd9Sstevel@tonic-gate */
21367c478bd9Sstevel@tonic-gate if (dp->disp_max_unbound_pri != pri)
21377c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = pri;
21387c478bd9Sstevel@tonic-gate }
21397c478bd9Sstevel@tonic-gate
21407c478bd9Sstevel@tonic-gate /*
21417c478bd9Sstevel@tonic-gate * disp_adjust_unbound_pri() - thread is becoming unbound, so we should
21427c478bd9Sstevel@tonic-gate * check if the CPU to which is was previously bound should have
21437c478bd9Sstevel@tonic-gate * its disp_max_unbound_pri increased.
21447c478bd9Sstevel@tonic-gate */
21457c478bd9Sstevel@tonic-gate void
disp_adjust_unbound_pri(kthread_t * tp)21467c478bd9Sstevel@tonic-gate disp_adjust_unbound_pri(kthread_t *tp)
21477c478bd9Sstevel@tonic-gate {
21487c478bd9Sstevel@tonic-gate disp_t *dp;
21497c478bd9Sstevel@tonic-gate pri_t tpri;
21507c478bd9Sstevel@tonic-gate
21517c478bd9Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(tp));
21527c478bd9Sstevel@tonic-gate
21537c478bd9Sstevel@tonic-gate /*
21547c478bd9Sstevel@tonic-gate * Don't do anything if the thread is not bound, or
21557c478bd9Sstevel@tonic-gate * currently not runnable or swapped out.
21567c478bd9Sstevel@tonic-gate */
21577c478bd9Sstevel@tonic-gate if (tp->t_bound_cpu == NULL ||
21587c478bd9Sstevel@tonic-gate tp->t_state != TS_RUN ||
21597c478bd9Sstevel@tonic-gate tp->t_schedflag & TS_ON_SWAPQ)
21607c478bd9Sstevel@tonic-gate return;
21617c478bd9Sstevel@tonic-gate
21627c478bd9Sstevel@tonic-gate tpri = DISP_PRIO(tp);
21637c478bd9Sstevel@tonic-gate dp = tp->t_bound_cpu->cpu_disp;
21647c478bd9Sstevel@tonic-gate ASSERT(tpri >= 0 && tpri < dp->disp_npri);
21657c478bd9Sstevel@tonic-gate if (tpri > dp->disp_max_unbound_pri)
21667c478bd9Sstevel@tonic-gate dp->disp_max_unbound_pri = tpri;
21677c478bd9Sstevel@tonic-gate }
21687c478bd9Sstevel@tonic-gate
21697c478bd9Sstevel@tonic-gate /*
2170685679f7Sakolb * disp_getbest()
2171685679f7Sakolb * De-queue the highest priority unbound runnable thread.
2172685679f7Sakolb * Returns with the thread unlocked and onproc but at splhigh (like disp()).
2173685679f7Sakolb * Returns NULL if nothing found.
2174685679f7Sakolb * Returns T_DONTSTEAL if the thread was not stealable.
2175685679f7Sakolb * so that the caller will try again later.
21767c478bd9Sstevel@tonic-gate *
2177685679f7Sakolb * Passed a pointer to a dispatch queue not associated with this CPU, and
2178685679f7Sakolb * its type.
21797c478bd9Sstevel@tonic-gate */
21807c478bd9Sstevel@tonic-gate static kthread_t *
disp_getbest(disp_t * dp)21817c478bd9Sstevel@tonic-gate disp_getbest(disp_t *dp)
21827c478bd9Sstevel@tonic-gate {
21837c478bd9Sstevel@tonic-gate kthread_t *tp;
21847c478bd9Sstevel@tonic-gate dispq_t *dq;
21857c478bd9Sstevel@tonic-gate pri_t pri;
2186685679f7Sakolb cpu_t *cp, *tcp;
2187685679f7Sakolb boolean_t allbound;
21887c478bd9Sstevel@tonic-gate
21897c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock);
21907c478bd9Sstevel@tonic-gate
21917c478bd9Sstevel@tonic-gate /*
21927c478bd9Sstevel@tonic-gate * If there is nothing to run, or the CPU is in the middle of a
21937c478bd9Sstevel@tonic-gate * context switch of the only thread, return NULL.
21947c478bd9Sstevel@tonic-gate */
2195685679f7Sakolb tcp = dp->disp_cpu;
2196685679f7Sakolb cp = CPU;
21977c478bd9Sstevel@tonic-gate pri = dp->disp_max_unbound_pri;
21987c478bd9Sstevel@tonic-gate if (pri == -1 ||
2199685679f7Sakolb (tcp != NULL && (tcp->cpu_disp_flags & CPU_DISP_DONTSTEAL) &&
2200685679f7Sakolb tcp->cpu_disp->disp_nrunnable == 1)) {
22017c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock);
22027c478bd9Sstevel@tonic-gate return (NULL);
22037c478bd9Sstevel@tonic-gate }
22047c478bd9Sstevel@tonic-gate
22057c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri];
2206685679f7Sakolb
22077c478bd9Sstevel@tonic-gate
22087c478bd9Sstevel@tonic-gate /*
2209685679f7Sakolb * Assume that all threads are bound on this queue, and change it
2210685679f7Sakolb * later when we find out that it is not the case.
22117c478bd9Sstevel@tonic-gate */
2212685679f7Sakolb allbound = B_TRUE;
2213685679f7Sakolb for (tp = dq->dq_first; tp != NULL; tp = tp->t_link) {
2214685679f7Sakolb hrtime_t now, nosteal, rqtime;
2215685679f7Sakolb
2216685679f7Sakolb /*
2217685679f7Sakolb * Skip over bound threads which could be here even
2218685679f7Sakolb * though disp_max_unbound_pri indicated this level.
2219685679f7Sakolb */
2220685679f7Sakolb if (tp->t_bound_cpu || tp->t_weakbound_cpu)
2221685679f7Sakolb continue;
2222685679f7Sakolb
2223685679f7Sakolb /*
2224685679f7Sakolb * We've got some unbound threads on this queue, so turn
2225685679f7Sakolb * the allbound flag off now.
2226685679f7Sakolb */
2227685679f7Sakolb allbound = B_FALSE;
2228685679f7Sakolb
2229685679f7Sakolb /*
2230685679f7Sakolb * The thread is a candidate for stealing from its run queue. We
2231685679f7Sakolb * don't want to steal threads that became runnable just a
2232685679f7Sakolb * moment ago. This improves CPU affinity for threads that get
2233685679f7Sakolb * preempted for short periods of time and go back on the run
2234685679f7Sakolb * queue.
2235685679f7Sakolb *
2236685679f7Sakolb * We want to let it stay on its run queue if it was only placed
2237685679f7Sakolb * there recently and it was running on the same CPU before that
2238685679f7Sakolb * to preserve its cache investment. For the thread to remain on
2239685679f7Sakolb * its run queue, ALL of the following conditions must be
2240685679f7Sakolb * satisfied:
2241685679f7Sakolb *
2242685679f7Sakolb * - the disp queue should not be the kernel preemption queue
2243685679f7Sakolb * - delayed idle stealing should not be disabled
2244685679f7Sakolb * - nosteal_nsec should be non-zero
2245685679f7Sakolb * - it should run with user priority
2246685679f7Sakolb * - it should be on the run queue of the CPU where it was
2247685679f7Sakolb * running before being placed on the run queue
2248685679f7Sakolb * - it should be the only thread on the run queue (to prevent
2249685679f7Sakolb * extra scheduling latency for other threads)
2250685679f7Sakolb * - it should sit on the run queue for less than per-chip
2251685679f7Sakolb * nosteal interval or global nosteal interval
2252685679f7Sakolb * - in case of CPUs with shared cache it should sit in a run
2253685679f7Sakolb * queue of a CPU from a different chip
2254685679f7Sakolb *
2255685679f7Sakolb * The checks are arranged so that the ones that are faster are
2256685679f7Sakolb * placed earlier.
2257685679f7Sakolb */
2258685679f7Sakolb if (tcp == NULL ||
2259685679f7Sakolb pri >= minclsyspri ||
2260685679f7Sakolb tp->t_cpu != tcp)
2261685679f7Sakolb break;
2262685679f7Sakolb
2263685679f7Sakolb /*
2264fb2f18f8Sesaxe * Steal immediately if, due to CMT processor architecture
2265fb2f18f8Sesaxe * migraiton between cp and tcp would incur no performance
2266fb2f18f8Sesaxe * penalty.
2267685679f7Sakolb */
2268fb2f18f8Sesaxe if (pg_cmt_can_migrate(cp, tcp))
2269685679f7Sakolb break;
2270685679f7Sakolb
2271fb2f18f8Sesaxe nosteal = nosteal_nsec;
2272fb2f18f8Sesaxe if (nosteal == 0)
2273685679f7Sakolb break;
2274685679f7Sakolb
2275685679f7Sakolb /*
2276685679f7Sakolb * Calculate time spent sitting on run queue
2277685679f7Sakolb */
2278685679f7Sakolb now = gethrtime_unscaled();
2279685679f7Sakolb rqtime = now - tp->t_waitrq;
2280685679f7Sakolb scalehrtime(&rqtime);
2281685679f7Sakolb
2282685679f7Sakolb /*
2283685679f7Sakolb * Steal immediately if the time spent on this run queue is more
2284685679f7Sakolb * than allowed nosteal delay.
2285685679f7Sakolb *
2286685679f7Sakolb * Negative rqtime check is needed here to avoid infinite
2287685679f7Sakolb * stealing delays caused by unlikely but not impossible
2288685679f7Sakolb * drifts between CPU times on different CPUs.
2289685679f7Sakolb */
2290685679f7Sakolb if (rqtime > nosteal || rqtime < 0)
2291685679f7Sakolb break;
2292685679f7Sakolb
2293685679f7Sakolb DTRACE_PROBE4(nosteal, kthread_t *, tp,
2294685679f7Sakolb cpu_t *, tcp, cpu_t *, cp, hrtime_t, rqtime);
2295685679f7Sakolb scalehrtime(&now);
2296685679f7Sakolb /*
2297685679f7Sakolb * Calculate when this thread becomes stealable
2298685679f7Sakolb */
2299685679f7Sakolb now += (nosteal - rqtime);
2300685679f7Sakolb
2301685679f7Sakolb /*
2302685679f7Sakolb * Calculate time when some thread becomes stealable
2303685679f7Sakolb */
2304685679f7Sakolb if (now < dp->disp_steal)
2305685679f7Sakolb dp->disp_steal = now;
23067c478bd9Sstevel@tonic-gate }
23077c478bd9Sstevel@tonic-gate
23087c478bd9Sstevel@tonic-gate /*
23097c478bd9Sstevel@tonic-gate * If there were no unbound threads on this queue, find the queue
2310685679f7Sakolb * where they are and then return later. The value of
2311685679f7Sakolb * disp_max_unbound_pri is not always accurate because it isn't
2312685679f7Sakolb * reduced until another idle CPU looks for work.
2313685679f7Sakolb */
2314685679f7Sakolb if (allbound)
2315685679f7Sakolb disp_fix_unbound_pri(dp, pri);
2316685679f7Sakolb
2317685679f7Sakolb /*
2318685679f7Sakolb * If we reached the end of the queue and found no unbound threads
2319685679f7Sakolb * then return NULL so that other CPUs will be considered. If there
2320685679f7Sakolb * are unbound threads but they cannot yet be stolen, then
2321685679f7Sakolb * return T_DONTSTEAL and try again later.
23227c478bd9Sstevel@tonic-gate */
23237c478bd9Sstevel@tonic-gate if (tp == NULL) {
23247c478bd9Sstevel@tonic-gate disp_lock_exit_nopreempt(&dp->disp_lock);
2325685679f7Sakolb return (allbound ? NULL : T_DONTSTEAL);
23267c478bd9Sstevel@tonic-gate }
23277c478bd9Sstevel@tonic-gate
23287c478bd9Sstevel@tonic-gate /*
23297c478bd9Sstevel@tonic-gate * Found a runnable, unbound thread, so remove it from queue.
23307c478bd9Sstevel@tonic-gate * dispdeq() requires that we have the thread locked, and we do,
23317c478bd9Sstevel@tonic-gate * by virtue of holding the dispatch queue lock. dispdeq() will
23327c478bd9Sstevel@tonic-gate * put the thread in transition state, thereby dropping the dispq
23337c478bd9Sstevel@tonic-gate * lock.
23347c478bd9Sstevel@tonic-gate */
2335685679f7Sakolb
23367c478bd9Sstevel@tonic-gate #ifdef DEBUG
23377c478bd9Sstevel@tonic-gate {
23387c478bd9Sstevel@tonic-gate int thread_was_on_queue;
23397c478bd9Sstevel@tonic-gate
23407c478bd9Sstevel@tonic-gate thread_was_on_queue = dispdeq(tp); /* drops disp_lock */
23417c478bd9Sstevel@tonic-gate ASSERT(thread_was_on_queue);
23427c478bd9Sstevel@tonic-gate }
2343685679f7Sakolb
23447c478bd9Sstevel@tonic-gate #else /* DEBUG */
23457c478bd9Sstevel@tonic-gate (void) dispdeq(tp); /* drops disp_lock */
23467c478bd9Sstevel@tonic-gate #endif /* DEBUG */
23477c478bd9Sstevel@tonic-gate
2348685679f7Sakolb /*
2349685679f7Sakolb * Reset the disp_queue steal time - we do not know what is the smallest
2350685679f7Sakolb * value across the queue is.
2351685679f7Sakolb */
2352685679f7Sakolb dp->disp_steal = 0;
2353685679f7Sakolb
23547c478bd9Sstevel@tonic-gate tp->t_schedflag |= TS_DONT_SWAP;
23557c478bd9Sstevel@tonic-gate
23567c478bd9Sstevel@tonic-gate /*
23577c478bd9Sstevel@tonic-gate * Setup thread to run on the current CPU.
23587c478bd9Sstevel@tonic-gate */
23597c478bd9Sstevel@tonic-gate tp->t_disp_queue = cp->cpu_disp;
23607c478bd9Sstevel@tonic-gate
23617c478bd9Sstevel@tonic-gate cp->cpu_dispthread = tp; /* protected by spl only */
23627c478bd9Sstevel@tonic-gate cp->cpu_dispatch_pri = pri;
23630f500aa6Sbpramod
23640f500aa6Sbpramod /*
23650f500aa6Sbpramod * There can be a memory synchronization race between disp_getbest()
23660f500aa6Sbpramod * and disp_ratify() vs cpu_resched() where cpu_resched() is trying
23670f500aa6Sbpramod * to preempt the current thread to run the enqueued thread while
23680f500aa6Sbpramod * disp_getbest() and disp_ratify() are changing the current thread
23690f500aa6Sbpramod * to the stolen thread. This may lead to a situation where
23700f500aa6Sbpramod * cpu_resched() tries to preempt the wrong thread and the
23710f500aa6Sbpramod * stolen thread continues to run on the CPU which has been tagged
23720f500aa6Sbpramod * for preemption.
23730f500aa6Sbpramod * Later the clock thread gets enqueued but doesn't get to run on the
23740f500aa6Sbpramod * CPU causing the system to hang.
23750f500aa6Sbpramod *
23760f500aa6Sbpramod * To avoid this, grabbing and dropping the disp_lock (which does
23770f500aa6Sbpramod * a memory barrier) is needed to synchronize the execution of
23780f500aa6Sbpramod * cpu_resched() with disp_getbest() and disp_ratify() and
23790f500aa6Sbpramod * synchronize the memory read and written by cpu_resched(),
23800f500aa6Sbpramod * disp_getbest(), and disp_ratify() with each other.
23810f500aa6Sbpramod * (see CR#6482861 for more details).
23820f500aa6Sbpramod */
23830f500aa6Sbpramod disp_lock_enter_high(&cp->cpu_disp->disp_lock);
23840f500aa6Sbpramod disp_lock_exit_high(&cp->cpu_disp->disp_lock);
23850f500aa6Sbpramod
23867c478bd9Sstevel@tonic-gate ASSERT(pri == DISP_PRIO(tp));
23877c478bd9Sstevel@tonic-gate
2388685679f7Sakolb DTRACE_PROBE3(steal, kthread_t *, tp, cpu_t *, tcp, cpu_t *, cp);
2389685679f7Sakolb
23907c478bd9Sstevel@tonic-gate thread_onproc(tp, cp); /* set t_state to TS_ONPROC */
23917c478bd9Sstevel@tonic-gate
23927c478bd9Sstevel@tonic-gate /*
23937c478bd9Sstevel@tonic-gate * Return with spl high so that swtch() won't need to raise it.
23947c478bd9Sstevel@tonic-gate * The disp_lock was dropped by dispdeq().
23957c478bd9Sstevel@tonic-gate */
23967c478bd9Sstevel@tonic-gate
23977c478bd9Sstevel@tonic-gate return (tp);
23987c478bd9Sstevel@tonic-gate }
23997c478bd9Sstevel@tonic-gate
24007c478bd9Sstevel@tonic-gate /*
24017c478bd9Sstevel@tonic-gate * disp_bound_common() - common routine for higher level functions
24027c478bd9Sstevel@tonic-gate * that check for bound threads under certain conditions.
24037c478bd9Sstevel@tonic-gate * If 'threadlistsafe' is set then there is no need to acquire
24047c478bd9Sstevel@tonic-gate * pidlock to stop the thread list from changing (eg, if
24057c478bd9Sstevel@tonic-gate * disp_bound_* is called with cpus paused).
24067c478bd9Sstevel@tonic-gate */
24077c478bd9Sstevel@tonic-gate static int
disp_bound_common(cpu_t * cp,int threadlistsafe,int flag)24087c478bd9Sstevel@tonic-gate disp_bound_common(cpu_t *cp, int threadlistsafe, int flag)
24097c478bd9Sstevel@tonic-gate {
24107c478bd9Sstevel@tonic-gate int found = 0;
24117c478bd9Sstevel@tonic-gate kthread_t *tp;
24127c478bd9Sstevel@tonic-gate
24137c478bd9Sstevel@tonic-gate ASSERT(flag);
24147c478bd9Sstevel@tonic-gate
24157c478bd9Sstevel@tonic-gate if (!threadlistsafe)
24167c478bd9Sstevel@tonic-gate mutex_enter(&pidlock);
24177c478bd9Sstevel@tonic-gate tp = curthread; /* faster than allthreads */
24187c478bd9Sstevel@tonic-gate do {
24197c478bd9Sstevel@tonic-gate if (tp->t_state != TS_FREE) {
24207c478bd9Sstevel@tonic-gate /*
24217c478bd9Sstevel@tonic-gate * If an interrupt thread is busy, but the
24227c478bd9Sstevel@tonic-gate * caller doesn't care (i.e. BOUND_INTR is off),
24237c478bd9Sstevel@tonic-gate * then just ignore it and continue through.
24247c478bd9Sstevel@tonic-gate */
24257c478bd9Sstevel@tonic-gate if ((tp->t_flag & T_INTR_THREAD) &&
24267c478bd9Sstevel@tonic-gate !(flag & BOUND_INTR))
24277c478bd9Sstevel@tonic-gate continue;
24287c478bd9Sstevel@tonic-gate
24297c478bd9Sstevel@tonic-gate /*
24307c478bd9Sstevel@tonic-gate * Skip the idle thread for the CPU
24317c478bd9Sstevel@tonic-gate * we're about to set offline.
24327c478bd9Sstevel@tonic-gate */
24337c478bd9Sstevel@tonic-gate if (tp == cp->cpu_idle_thread)
24347c478bd9Sstevel@tonic-gate continue;
24357c478bd9Sstevel@tonic-gate
24367c478bd9Sstevel@tonic-gate /*
24377c478bd9Sstevel@tonic-gate * Skip the pause thread for the CPU
24387c478bd9Sstevel@tonic-gate * we're about to set offline.
24397c478bd9Sstevel@tonic-gate */
24407c478bd9Sstevel@tonic-gate if (tp == cp->cpu_pause_thread)
24417c478bd9Sstevel@tonic-gate continue;
24427c478bd9Sstevel@tonic-gate
24437c478bd9Sstevel@tonic-gate if ((flag & BOUND_CPU) &&
24447c478bd9Sstevel@tonic-gate (tp->t_bound_cpu == cp ||
24457c478bd9Sstevel@tonic-gate tp->t_bind_cpu == cp->cpu_id ||
24467c478bd9Sstevel@tonic-gate tp->t_weakbound_cpu == cp)) {
24477c478bd9Sstevel@tonic-gate found = 1;
24487c478bd9Sstevel@tonic-gate break;
24497c478bd9Sstevel@tonic-gate }
24507c478bd9Sstevel@tonic-gate
24517c478bd9Sstevel@tonic-gate if ((flag & BOUND_PARTITION) &&
24527c478bd9Sstevel@tonic-gate (tp->t_cpupart == cp->cpu_part)) {
24537c478bd9Sstevel@tonic-gate found = 1;
24547c478bd9Sstevel@tonic-gate break;
24557c478bd9Sstevel@tonic-gate }
24567c478bd9Sstevel@tonic-gate }
24577c478bd9Sstevel@tonic-gate } while ((tp = tp->t_next) != curthread && found == 0);
24587c478bd9Sstevel@tonic-gate if (!threadlistsafe)
24597c478bd9Sstevel@tonic-gate mutex_exit(&pidlock);
24607c478bd9Sstevel@tonic-gate return (found);
24617c478bd9Sstevel@tonic-gate }
24627c478bd9Sstevel@tonic-gate
24637c478bd9Sstevel@tonic-gate /*
24647c478bd9Sstevel@tonic-gate * disp_bound_threads - return nonzero if threads are bound to the processor.
24657c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple.
24667c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc.
24677c478bd9Sstevel@tonic-gate */
24687c478bd9Sstevel@tonic-gate int
disp_bound_threads(cpu_t * cp,int threadlistsafe)24697c478bd9Sstevel@tonic-gate disp_bound_threads(cpu_t *cp, int threadlistsafe)
24707c478bd9Sstevel@tonic-gate {
24717c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU));
24727c478bd9Sstevel@tonic-gate }
24737c478bd9Sstevel@tonic-gate
24747c478bd9Sstevel@tonic-gate /*
24757c478bd9Sstevel@tonic-gate * disp_bound_anythreads - return nonzero if _any_ threads are bound
24767c478bd9Sstevel@tonic-gate * to the given processor, including interrupt threads.
24777c478bd9Sstevel@tonic-gate */
24787c478bd9Sstevel@tonic-gate int
disp_bound_anythreads(cpu_t * cp,int threadlistsafe)24797c478bd9Sstevel@tonic-gate disp_bound_anythreads(cpu_t *cp, int threadlistsafe)
24807c478bd9Sstevel@tonic-gate {
24817c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_CPU | BOUND_INTR));
24827c478bd9Sstevel@tonic-gate }
24837c478bd9Sstevel@tonic-gate
24847c478bd9Sstevel@tonic-gate /*
24857c478bd9Sstevel@tonic-gate * disp_bound_partition - return nonzero if threads are bound to the same
24867c478bd9Sstevel@tonic-gate * partition as the processor.
24877c478bd9Sstevel@tonic-gate * Called infrequently. Keep this simple.
24887c478bd9Sstevel@tonic-gate * Includes threads that are asleep or stopped but not onproc.
24897c478bd9Sstevel@tonic-gate */
24907c478bd9Sstevel@tonic-gate int
disp_bound_partition(cpu_t * cp,int threadlistsafe)24917c478bd9Sstevel@tonic-gate disp_bound_partition(cpu_t *cp, int threadlistsafe)
24927c478bd9Sstevel@tonic-gate {
24937c478bd9Sstevel@tonic-gate return (disp_bound_common(cp, threadlistsafe, BOUND_PARTITION));
24947c478bd9Sstevel@tonic-gate }
24957c478bd9Sstevel@tonic-gate
24967c478bd9Sstevel@tonic-gate /*
24977c478bd9Sstevel@tonic-gate * disp_cpu_inactive - make a CPU inactive by moving all of its unbound
24987c478bd9Sstevel@tonic-gate * threads to other CPUs.
24997c478bd9Sstevel@tonic-gate */
25007c478bd9Sstevel@tonic-gate void
disp_cpu_inactive(cpu_t * cp)25017c478bd9Sstevel@tonic-gate disp_cpu_inactive(cpu_t *cp)
25027c478bd9Sstevel@tonic-gate {
25037c478bd9Sstevel@tonic-gate kthread_t *tp;
25047c478bd9Sstevel@tonic-gate disp_t *dp = cp->cpu_disp;
25057c478bd9Sstevel@tonic-gate dispq_t *dq;
25067c478bd9Sstevel@tonic-gate pri_t pri;
25077c478bd9Sstevel@tonic-gate int wasonq;
25087c478bd9Sstevel@tonic-gate
25097c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock);
25107c478bd9Sstevel@tonic-gate while ((pri = dp->disp_max_unbound_pri) != -1) {
25117c478bd9Sstevel@tonic-gate dq = &dp->disp_q[pri];
25127c478bd9Sstevel@tonic-gate tp = dq->dq_first;
25137c478bd9Sstevel@tonic-gate
25147c478bd9Sstevel@tonic-gate /*
25157c478bd9Sstevel@tonic-gate * Skip over bound threads.
25167c478bd9Sstevel@tonic-gate */
25177c478bd9Sstevel@tonic-gate while (tp != NULL && tp->t_bound_cpu != NULL) {
25187c478bd9Sstevel@tonic-gate tp = tp->t_link;
25197c478bd9Sstevel@tonic-gate }
25207c478bd9Sstevel@tonic-gate
25217c478bd9Sstevel@tonic-gate if (tp == NULL) {
25227c478bd9Sstevel@tonic-gate /* disp_max_unbound_pri must be inaccurate, so fix it */
25237c478bd9Sstevel@tonic-gate disp_fix_unbound_pri(dp, pri);
25247c478bd9Sstevel@tonic-gate continue;
25257c478bd9Sstevel@tonic-gate }
25267c478bd9Sstevel@tonic-gate
25277c478bd9Sstevel@tonic-gate wasonq = dispdeq(tp); /* drops disp_lock */
25287c478bd9Sstevel@tonic-gate ASSERT(wasonq);
25297c478bd9Sstevel@tonic-gate ASSERT(tp->t_weakbound_cpu == NULL);
25307c478bd9Sstevel@tonic-gate
25317c478bd9Sstevel@tonic-gate setbackdq(tp);
25327c478bd9Sstevel@tonic-gate /*
25337c478bd9Sstevel@tonic-gate * Called from cpu_offline:
25347c478bd9Sstevel@tonic-gate *
25357c478bd9Sstevel@tonic-gate * cp has already been removed from the list of active cpus
25367c478bd9Sstevel@tonic-gate * and tp->t_cpu has been changed so there is no risk of
25377c478bd9Sstevel@tonic-gate * tp ending up back on cp.
25387c478bd9Sstevel@tonic-gate *
25397c478bd9Sstevel@tonic-gate * Called from cpupart_move_cpu:
25407c478bd9Sstevel@tonic-gate *
25417c478bd9Sstevel@tonic-gate * The cpu has moved to a new cpupart. Any threads that
25427c478bd9Sstevel@tonic-gate * were on it's dispatch queues before the move remain
25437c478bd9Sstevel@tonic-gate * in the old partition and can't run in the new partition.
25447c478bd9Sstevel@tonic-gate */
25457c478bd9Sstevel@tonic-gate ASSERT(tp->t_cpu != cp);
25467c478bd9Sstevel@tonic-gate thread_unlock(tp);
25477c478bd9Sstevel@tonic-gate
25487c478bd9Sstevel@tonic-gate disp_lock_enter(&dp->disp_lock);
25497c478bd9Sstevel@tonic-gate }
25507c478bd9Sstevel@tonic-gate disp_lock_exit(&dp->disp_lock);
25517c478bd9Sstevel@tonic-gate }
25527c478bd9Sstevel@tonic-gate
25537c478bd9Sstevel@tonic-gate /*
2554455e370cSJohn Levon * Return a score rating this CPU for running this thread: lower is better.
25557c478bd9Sstevel@tonic-gate *
2556455e370cSJohn Levon * If curthread is looking for a new CPU, then we ignore cpu_dispatch_pri for
2557455e370cSJohn Levon * curcpu (as that's our own priority).
25587c478bd9Sstevel@tonic-gate *
2559455e370cSJohn Levon * If a cpu is the target of an offline request, then try to avoid it.
25607c478bd9Sstevel@tonic-gate *
2561455e370cSJohn Levon * Otherwise we'll use double the effective dispatcher priority for the CPU.
25627c478bd9Sstevel@tonic-gate *
2563c3377ee9SJohn Levon * We do this so smt_adjust_cpu_score() can increment the score if needed,
2564455e370cSJohn Levon * without ending up over-riding a dispatcher priority.
2565455e370cSJohn Levon */
2566455e370cSJohn Levon static pri_t
cpu_score(cpu_t * cp,kthread_t * tp)2567455e370cSJohn Levon cpu_score(cpu_t *cp, kthread_t *tp)
2568455e370cSJohn Levon {
2569455e370cSJohn Levon pri_t score;
2570455e370cSJohn Levon
2571455e370cSJohn Levon if (tp == curthread && cp == curthread->t_cpu)
2572455e370cSJohn Levon score = 2 * CPU_IDLE_PRI;
2573455e370cSJohn Levon else if (cp == cpu_inmotion)
2574455e370cSJohn Levon score = SHRT_MAX;
2575455e370cSJohn Levon else
2576455e370cSJohn Levon score = 2 * cp->cpu_dispatch_pri;
2577455e370cSJohn Levon
2578455e370cSJohn Levon if (2 * cp->cpu_disp->disp_maxrunpri > score)
2579455e370cSJohn Levon score = 2 * cp->cpu_disp->disp_maxrunpri;
2580455e370cSJohn Levon if (2 * cp->cpu_chosen_level > score)
2581455e370cSJohn Levon score = 2 * cp->cpu_chosen_level;
2582455e370cSJohn Levon
2583c3377ee9SJohn Levon return (smt_adjust_cpu_score(tp, cp, score));
2584455e370cSJohn Levon }
2585455e370cSJohn Levon
2586455e370cSJohn Levon /*
2587455e370cSJohn Levon * disp_lowpri_cpu - find a suitable CPU to run the given thread.
25887c478bd9Sstevel@tonic-gate *
2589455e370cSJohn Levon * We are looking for a CPU with an effective dispatch priority lower than the
2590455e370cSJohn Levon * thread's, so that the thread will run immediately rather than be enqueued.
2591455e370cSJohn Levon * For NUMA locality, we prefer "home" CPUs within the thread's ->t_lpl group.
2592455e370cSJohn Levon * If we don't find an available CPU there, we will expand our search to include
2593455e370cSJohn Levon * wider locality levels. (Note these groups are already divided by CPU
2594455e370cSJohn Levon * partition.)
2595455e370cSJohn Levon *
2596455e370cSJohn Levon * If the thread cannot immediately run on *any* CPU, we'll enqueue ourselves on
2597455e370cSJohn Levon * the best home CPU we found.
2598455e370cSJohn Levon *
2599455e370cSJohn Levon * The hint passed in is used as a starting point so we don't favor CPU 0 or any
2600455e370cSJohn Levon * other CPU. The caller should pass in the most recently used CPU for the
2601455e370cSJohn Levon * thread; it's of course possible that this CPU isn't in the home lgroup.
2602455e370cSJohn Levon *
2603455e370cSJohn Levon * This function must be called at either high SPL, or with preemption disabled,
2604455e370cSJohn Levon * so that the "hint" CPU cannot be removed from the online CPU list while we
2605455e370cSJohn Levon * are traversing it.
26067c478bd9Sstevel@tonic-gate */
26077c478bd9Sstevel@tonic-gate cpu_t *
disp_lowpri_cpu(cpu_t * hint,kthread_t * tp,pri_t tpri)2608455e370cSJohn Levon disp_lowpri_cpu(cpu_t *hint, kthread_t *tp, pri_t tpri)
26097c478bd9Sstevel@tonic-gate {
26107c478bd9Sstevel@tonic-gate cpu_t *bestcpu;
26117c478bd9Sstevel@tonic-gate cpu_t *besthomecpu;
26127c478bd9Sstevel@tonic-gate cpu_t *cp, *cpstart;
26137c478bd9Sstevel@tonic-gate
26147c478bd9Sstevel@tonic-gate klgrpset_t done;
26157c478bd9Sstevel@tonic-gate
26167c478bd9Sstevel@tonic-gate lpl_t *lpl_iter, *lpl_leaf;
26177c478bd9Sstevel@tonic-gate
26187c478bd9Sstevel@tonic-gate ASSERT(hint != NULL);
2619455e370cSJohn Levon ASSERT(tp->t_lpl->lpl_ncpu > 0);
26207c478bd9Sstevel@tonic-gate
26217c478bd9Sstevel@tonic-gate bestcpu = besthomecpu = NULL;
26227c478bd9Sstevel@tonic-gate klgrpset_clear(done);
26237c478bd9Sstevel@tonic-gate
2624455e370cSJohn Levon lpl_iter = tp->t_lpl;
26257c478bd9Sstevel@tonic-gate
26267c478bd9Sstevel@tonic-gate do {
2627455e370cSJohn Levon pri_t best = SHRT_MAX;
2628455e370cSJohn Levon klgrpset_t cur_set;
26297c478bd9Sstevel@tonic-gate
26307c478bd9Sstevel@tonic-gate klgrpset_clear(cur_set);
26317c478bd9Sstevel@tonic-gate
2632455e370cSJohn Levon for (int i = 0; i < lpl_iter->lpl_nrset; i++) {
26337c478bd9Sstevel@tonic-gate lpl_leaf = lpl_iter->lpl_rset[i];
26347c478bd9Sstevel@tonic-gate if (klgrpset_ismember(done, lpl_leaf->lpl_lgrpid))
26357c478bd9Sstevel@tonic-gate continue;
26367c478bd9Sstevel@tonic-gate
26377c478bd9Sstevel@tonic-gate klgrpset_add(cur_set, lpl_leaf->lpl_lgrpid);
26387c478bd9Sstevel@tonic-gate
26397c478bd9Sstevel@tonic-gate if (hint->cpu_lpl == lpl_leaf)
26407c478bd9Sstevel@tonic-gate cp = cpstart = hint;
26417c478bd9Sstevel@tonic-gate else
26427c478bd9Sstevel@tonic-gate cp = cpstart = lpl_leaf->lpl_cpus;
26437c478bd9Sstevel@tonic-gate
26447c478bd9Sstevel@tonic-gate do {
2645455e370cSJohn Levon pri_t score = cpu_score(cp, tp);
2646455e370cSJohn Levon
2647455e370cSJohn Levon if (score < best) {
2648455e370cSJohn Levon best = score;
26497c478bd9Sstevel@tonic-gate bestcpu = cp;
2650455e370cSJohn Levon
2651455e370cSJohn Levon /* An idle CPU: we're done. */
2652455e370cSJohn Levon if (score / 2 == CPU_IDLE_PRI)
2653455e370cSJohn Levon goto out;
26547c478bd9Sstevel@tonic-gate }
26557c478bd9Sstevel@tonic-gate } while ((cp = cp->cpu_next_lpl) != cpstart);
26567c478bd9Sstevel@tonic-gate }
26577c478bd9Sstevel@tonic-gate
2658455e370cSJohn Levon if (bestcpu != NULL && tpri > (best / 2))
2659455e370cSJohn Levon goto out;
2660455e370cSJohn Levon
26617c478bd9Sstevel@tonic-gate if (besthomecpu == NULL)
26627c478bd9Sstevel@tonic-gate besthomecpu = bestcpu;
2663455e370cSJohn Levon
26647c478bd9Sstevel@tonic-gate /*
26657c478bd9Sstevel@tonic-gate * Add the lgrps we just considered to the "done" set
26667c478bd9Sstevel@tonic-gate */
26677c478bd9Sstevel@tonic-gate klgrpset_or(done, cur_set);
26687c478bd9Sstevel@tonic-gate
26697c478bd9Sstevel@tonic-gate } while ((lpl_iter = lpl_iter->lpl_parent) != NULL);
26707c478bd9Sstevel@tonic-gate
26717c478bd9Sstevel@tonic-gate /*
26727c478bd9Sstevel@tonic-gate * The specified priority isn't high enough to run immediately
26737c478bd9Sstevel@tonic-gate * anywhere, so just return the best CPU from the home lgroup.
26747c478bd9Sstevel@tonic-gate */
2675455e370cSJohn Levon bestcpu = besthomecpu;
2676455e370cSJohn Levon
2677455e370cSJohn Levon out:
2678455e370cSJohn Levon ASSERT((bestcpu->cpu_flags & CPU_QUIESCED) == 0);
2679455e370cSJohn Levon return (bestcpu);
26807c478bd9Sstevel@tonic-gate }
26817c478bd9Sstevel@tonic-gate
26827c478bd9Sstevel@tonic-gate /*
26837c478bd9Sstevel@tonic-gate * This routine provides the generic idle cpu function for all processors.
26847c478bd9Sstevel@tonic-gate * If a processor has some specific code to execute when idle (say, to stop
26857c478bd9Sstevel@tonic-gate * the pipeline and save power) then that routine should be defined in the
26867c478bd9Sstevel@tonic-gate * processors specific code (module_xx.c) and the global variable idle_cpu
26877c478bd9Sstevel@tonic-gate * set to that function.
26887c478bd9Sstevel@tonic-gate */
26897c478bd9Sstevel@tonic-gate static void
generic_idle_cpu(void)26907c478bd9Sstevel@tonic-gate generic_idle_cpu(void)
26917c478bd9Sstevel@tonic-gate {
26927c478bd9Sstevel@tonic-gate }
26937c478bd9Sstevel@tonic-gate
26947c478bd9Sstevel@tonic-gate /*ARGSUSED*/
26957c478bd9Sstevel@tonic-gate static void
generic_enq_thread(cpu_t * cpu,int bound)26967c478bd9Sstevel@tonic-gate generic_enq_thread(cpu_t *cpu, int bound)
26977c478bd9Sstevel@tonic-gate {
26987c478bd9Sstevel@tonic-gate }
2699455e370cSJohn Levon
2700455e370cSJohn Levon cpu_t *
disp_choose_best_cpu(void)2701455e370cSJohn Levon disp_choose_best_cpu(void)
2702455e370cSJohn Levon {
2703455e370cSJohn Levon kthread_t *t = curthread;
2704455e370cSJohn Levon cpu_t *curcpu = CPU;
2705455e370cSJohn Levon
2706455e370cSJohn Levon ASSERT(t->t_preempt > 0);
2707455e370cSJohn Levon ASSERT(t->t_state == TS_ONPROC);
2708455e370cSJohn Levon ASSERT(t->t_schedflag & TS_VCPU);
2709455e370cSJohn Levon
2710c3377ee9SJohn Levon if (smt_should_run(t, curcpu))
2711455e370cSJohn Levon return (curcpu);
2712455e370cSJohn Levon
2713455e370cSJohn Levon return (disp_lowpri_cpu(curcpu, t, t->t_pri));
2714455e370cSJohn Levon }
2715