xref: /dragonfly/sys/kern/subr_cpu_topology.c (revision 8edfbc5e)
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  * 3. Neither the name of The DragonFly Project nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific, prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/sbuf.h>
38 #include <sys/cpu_topology.h>
39 
40 #include <machine/smp.h>
41 
42 #ifndef NAPICID
43 #define NAPICID 256
44 #endif
45 
46 #define INDENT_BUF_SIZE LEVEL_NO*3
47 #define INVALID_ID -1
48 
49 /* Per-cpu sysctl nodes and info */
50 struct per_cpu_sysctl_info {
51 	struct sysctl_ctx_list sysctl_ctx;
52 	struct sysctl_oid *sysctl_tree;
53 	char cpu_name[32];
54 	int physical_id;
55 	int core_id;
56 	char physical_siblings[8*MAXCPU];
57 	char core_siblings[8*MAXCPU];
58 };
59 typedef struct per_cpu_sysctl_info per_cpu_sysctl_info_t;
60 
61 static cpu_node_t cpu_topology_nodes[MAXCPU];	/* Memory for topology */
62 static cpu_node_t *cpu_root_node;		/* Root node pointer */
63 
64 static struct sysctl_ctx_list cpu_topology_sysctl_ctx;
65 static struct sysctl_oid *cpu_topology_sysctl_tree;
66 static char cpu_topology_members[8*MAXCPU];
67 static per_cpu_sysctl_info_t *pcpu_sysctl;
68 static void sbuf_print_cpuset(struct sbuf *sb, cpumask_t *mask);
69 
70 int cpu_topology_levels_number = 1;
71 int cpu_topology_core_ids;
72 int cpu_topology_phys_ids;
73 cpu_node_t *root_cpu_node;
74 
75 MALLOC_DEFINE(M_PCPUSYS, "pcpusys", "pcpu sysctl topology");
76 
77 SYSCTL_INT(_hw, OID_AUTO, cpu_topology_core_ids, CTLFLAG_RW,
78 	   &cpu_topology_core_ids, 0, "# of real cores per package");
79 SYSCTL_INT(_hw, OID_AUTO, cpu_topology_phys_ids, CTLFLAG_RW,
80 	   &cpu_topology_phys_ids, 0, "# of physical packages");
81 
82 /* Get the next valid apicid starting
83  * from current apicid (curr_apicid
84  */
85 static int
86 get_next_valid_apicid(int curr_apicid)
87 {
88 	int next_apicid = curr_apicid;
89 	do {
90 		next_apicid++;
91 	}
92 	while(get_cpuid_from_apicid(next_apicid) == -1 &&
93 	   next_apicid < NAPICID);
94 	if (next_apicid == NAPICID) {
95 		kprintf("Warning: No next valid APICID found. Returning -1\n");
96 		return -1;
97 	}
98 	return next_apicid;
99 }
100 
101 /* Generic topology tree. The parameters have the following meaning:
102  * - children_no_per_level : the number of children on each level
103  * - level_types : the type of the level (THREAD, CORE, CHIP, etc)
104  * - cur_level : the current level of the tree
105  * - node : the current node
106  * - last_free_node : the last free node in the global array.
107  * - cpuid : basicly this are the ids of the leafs
108  */
109 static void
110 build_topology_tree(int *children_no_per_level,
111    uint8_t *level_types,
112    int cur_level,
113    cpu_node_t *node,
114    cpu_node_t **last_free_node,
115    int *apicid)
116 {
117 	int i;
118 
119 	node->child_no = children_no_per_level[cur_level];
120 	node->type = level_types[cur_level];
121 	CPUMASK_ASSZERO(node->members);
122 	node->compute_unit_id = -1;
123 
124 	if (node->child_no == 0) {
125 		*apicid = get_next_valid_apicid(*apicid);
126 		CPUMASK_ASSBIT(node->members, get_cpuid_from_apicid(*apicid));
127 		return;
128 	}
129 
130 	if (node->parent_node == NULL)
131 		root_cpu_node = node;
132 
133 	for (i = 0; i < node->child_no; i++) {
134 		node->child_node[i] = *last_free_node;
135 		(*last_free_node)++;
136 
137 		node->child_node[i]->parent_node = node;
138 
139 		build_topology_tree(children_no_per_level,
140 		    level_types,
141 		    cur_level + 1,
142 		    node->child_node[i],
143 		    last_free_node,
144 		    apicid);
145 
146 		CPUMASK_ORMASK(node->members, node->child_node[i]->members);
147 	}
148 }
149 
150 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
151 static void
152 migrate_elements(cpu_node_t **a, int n, int pos)
153 {
154 	int i;
155 
156 	for (i = pos; i < n - 1 ; i++) {
157 		a[i] = a[i+1];
158 	}
159 	a[i] = NULL;
160 }
161 #endif
162 
163 /* Build CPU topology. The detection is made by comparing the
164  * chip, core and logical IDs of each CPU with the IDs of the
165  * BSP. When we found a match, at that level the CPUs are siblings.
166  */
167 static void
168 build_cpu_topology(void)
169 {
170 	detect_cpu_topology();
171 	int i;
172 	int BSPID = 0;
173 	int threads_per_core = 0;
174 	int cores_per_chip = 0;
175 	int chips_per_package = 0;
176 	int children_no_per_level[LEVEL_NO];
177 	uint8_t level_types[LEVEL_NO];
178 	int apicid = -1;
179 
180 	cpu_node_t *root = &cpu_topology_nodes[0];
181 	cpu_node_t *last_free_node = root + 1;
182 
183 	/* Assume that the topology is uniform.
184 	 * Find the number of siblings within chip
185 	 * and witin core to build up the topology
186 	 */
187 	for (i = 0; i < ncpus; i++) {
188 		cpumask_t mask;
189 
190 		CPUMASK_ASSBIT(mask, i);
191 
192 		if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
193 			continue;
194 
195 		if (get_chip_ID(BSPID) == get_chip_ID(i))
196 			cores_per_chip++;
197 		else
198 			continue;
199 
200 		if (get_core_number_within_chip(BSPID) ==
201 		    get_core_number_within_chip(i))
202 			threads_per_core++;
203 	}
204 
205 	cores_per_chip /= threads_per_core;
206 	chips_per_package = ncpus / (cores_per_chip * threads_per_core);
207 
208 	if (bootverbose)
209 		kprintf("CPU Topology: cores_per_chip: %d; threads_per_core: %d; chips_per_package: %d;\n",
210 		    cores_per_chip, threads_per_core, chips_per_package);
211 
212 	if (threads_per_core > 1) { /* HT available - 4 levels */
213 
214 		children_no_per_level[0] = chips_per_package;
215 		children_no_per_level[1] = cores_per_chip;
216 		children_no_per_level[2] = threads_per_core;
217 		children_no_per_level[3] = 0;
218 
219 		level_types[0] = PACKAGE_LEVEL;
220 		level_types[1] = CHIP_LEVEL;
221 		level_types[2] = CORE_LEVEL;
222 		level_types[3] = THREAD_LEVEL;
223 
224 		build_topology_tree(children_no_per_level,
225 		    level_types,
226 		    0,
227 		    root,
228 		    &last_free_node,
229 		    &apicid);
230 
231 		cpu_topology_levels_number = 4;
232 
233 	} else if (cores_per_chip > 1) { /* No HT available - 3 levels */
234 
235 		children_no_per_level[0] = chips_per_package;
236 		children_no_per_level[1] = cores_per_chip;
237 		children_no_per_level[2] = 0;
238 
239 		level_types[0] = PACKAGE_LEVEL;
240 		level_types[1] = CHIP_LEVEL;
241 		level_types[2] = CORE_LEVEL;
242 
243 		build_topology_tree(children_no_per_level,
244 		    level_types,
245 		    0,
246 		    root,
247 		    &last_free_node,
248 		    &apicid);
249 
250 		cpu_topology_levels_number = 3;
251 
252 	} else { /* No HT and no Multi-Core - 2 levels */
253 
254 		children_no_per_level[0] = chips_per_package;
255 		children_no_per_level[1] = 0;
256 
257 		level_types[0] = PACKAGE_LEVEL;
258 		level_types[1] = CHIP_LEVEL;
259 
260 		build_topology_tree(children_no_per_level,
261 		    level_types,
262 		    0,
263 		    root,
264 		    &last_free_node,
265 		    &apicid);
266 
267 		cpu_topology_levels_number = 2;
268 
269 	}
270 
271 	cpu_root_node = root;
272 
273 
274 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
275 	if (fix_amd_topology() == 0) {
276 		int visited[MAXCPU], i, j, pos, cpuid;
277 		cpu_node_t *leaf, *parent;
278 
279 		bzero(visited, MAXCPU * sizeof(int));
280 
281 		for (i = 0; i < ncpus; i++) {
282 			if (visited[i] == 0) {
283 				pos = 0;
284 				visited[i] = 1;
285 				leaf = get_cpu_node_by_cpuid(i);
286 
287 				if (leaf->type == CORE_LEVEL) {
288 					parent = leaf->parent_node;
289 
290 					last_free_node->child_node[0] = leaf;
291 					last_free_node->child_no = 1;
292 					last_free_node->members = leaf->members;
293 					last_free_node->compute_unit_id = leaf->compute_unit_id;
294 					last_free_node->parent_node = parent;
295 					last_free_node->type = CORE_LEVEL;
296 
297 
298 					for (j = 0; j < parent->child_no; j++) {
299 						if (parent->child_node[j] != leaf) {
300 
301 							cpuid = BSFCPUMASK(parent->child_node[j]->members);
302 							if (visited[cpuid] == 0 &&
303 							    parent->child_node[j]->compute_unit_id == leaf->compute_unit_id) {
304 
305 								last_free_node->child_node[last_free_node->child_no] = parent->child_node[j];
306 								last_free_node->child_no++;
307 								CPUMASK_ORMASK(last_free_node->members, parent->child_node[j]->members);
308 
309 								parent->child_node[j]->type = THREAD_LEVEL;
310 								parent->child_node[j]->parent_node = last_free_node;
311 								visited[cpuid] = 1;
312 
313 								migrate_elements(parent->child_node, parent->child_no, j);
314 								parent->child_no--;
315 								j--;
316 							}
317 						} else {
318 							pos = j;
319 						}
320 					}
321 					if (last_free_node->child_no > 1) {
322 						parent->child_node[pos] = last_free_node;
323 						leaf->type = THREAD_LEVEL;
324 						leaf->parent_node = last_free_node;
325 						last_free_node++;
326 					}
327 				}
328 			}
329 		}
330 	}
331 #endif
332 }
333 
334 /* Recursive function helper to print the CPU topology tree */
335 static void
336 print_cpu_topology_tree_sysctl_helper(cpu_node_t *node,
337     struct sbuf *sb,
338     char * buf,
339     int buf_len,
340     int last)
341 {
342 	int i;
343 	int bsr_member;
344 
345 	sbuf_bcat(sb, buf, buf_len);
346 	if (last) {
347 		sbuf_printf(sb, "\\-");
348 		buf[buf_len] = ' ';buf_len++;
349 		buf[buf_len] = ' ';buf_len++;
350 	} else {
351 		sbuf_printf(sb, "|-");
352 		buf[buf_len] = '|';buf_len++;
353 		buf[buf_len] = ' ';buf_len++;
354 	}
355 
356 	bsr_member = BSRCPUMASK(node->members);
357 
358 	if (node->type == PACKAGE_LEVEL) {
359 		sbuf_printf(sb,"PACKAGE MEMBERS: ");
360 	} else if (node->type == CHIP_LEVEL) {
361 		sbuf_printf(sb,"CHIP ID %d: ",
362 			get_chip_ID(bsr_member));
363 	} else if (node->type == CORE_LEVEL) {
364 		if (node->compute_unit_id != (uint8_t)-1) {
365 			sbuf_printf(sb,"Compute Unit ID %d: ",
366 				node->compute_unit_id);
367 		} else {
368 			sbuf_printf(sb,"CORE ID %d: ",
369 				get_core_number_within_chip(bsr_member));
370 		}
371 	} else if (node->type == THREAD_LEVEL) {
372 		if (node->compute_unit_id != (uint8_t)-1) {
373 			sbuf_printf(sb,"CORE ID %d: ",
374 				get_core_number_within_chip(bsr_member));
375 		} else {
376 			sbuf_printf(sb,"THREAD ID %d: ",
377 				get_logical_CPU_number_within_core(bsr_member));
378 		}
379 	} else {
380 		sbuf_printf(sb,"UNKNOWN: ");
381 	}
382 	sbuf_print_cpuset(sb, &node->members);
383 	sbuf_printf(sb,"\n");
384 
385 	for (i = 0; i < node->child_no; i++) {
386 		print_cpu_topology_tree_sysctl_helper(node->child_node[i],
387 		    sb, buf, buf_len, i == (node->child_no -1));
388 	}
389 }
390 
391 /* SYSCTL PROCEDURE for printing the CPU Topology tree */
392 static int
393 print_cpu_topology_tree_sysctl(SYSCTL_HANDLER_ARGS)
394 {
395 	struct sbuf *sb;
396 	int ret;
397 	char buf[INDENT_BUF_SIZE];
398 
399 	KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
400 
401 	sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND);
402 	if (sb == NULL) {
403 		return (ENOMEM);
404 	}
405 	sbuf_printf(sb,"\n");
406 	print_cpu_topology_tree_sysctl_helper(cpu_root_node, sb, buf, 0, 1);
407 
408 	sbuf_finish(sb);
409 
410 	ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
411 
412 	sbuf_delete(sb);
413 
414 	return ret;
415 }
416 
417 /* SYSCTL PROCEDURE for printing the CPU Topology level description */
418 static int
419 print_cpu_topology_level_description_sysctl(SYSCTL_HANDLER_ARGS)
420 {
421 	struct sbuf *sb;
422 	int ret;
423 
424 	sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND);
425 	if (sb == NULL)
426 		return (ENOMEM);
427 
428 	if (cpu_topology_levels_number == 4) /* HT available */
429 		sbuf_printf(sb, "0 - thread; 1 - core; 2 - socket; 3 - anything");
430 	else if (cpu_topology_levels_number == 3) /* No HT available */
431 		sbuf_printf(sb, "0 - core; 1 - socket; 2 - anything");
432 	else if (cpu_topology_levels_number == 2) /* No HT and no Multi-Core */
433 		sbuf_printf(sb, "0 - socket; 1 - anything");
434 	else
435 		sbuf_printf(sb, "Unknown");
436 
437 	sbuf_finish(sb);
438 
439 	ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
440 
441 	sbuf_delete(sb);
442 
443 	return ret;
444 }
445 
446 /* Find a cpu_node_t by a mask */
447 static cpu_node_t *
448 get_cpu_node_by_cpumask(cpu_node_t * node,
449 			cpumask_t mask) {
450 
451 	cpu_node_t * found = NULL;
452 	int i;
453 
454 	if (CPUMASK_CMPMASKEQ(node->members, mask))
455 		return node;
456 
457 	for (i = 0; i < node->child_no; i++) {
458 		found = get_cpu_node_by_cpumask(node->child_node[i], mask);
459 		if (found != NULL) {
460 			return found;
461 		}
462 	}
463 	return NULL;
464 }
465 
466 cpu_node_t *
467 get_cpu_node_by_cpuid(int cpuid) {
468 	cpumask_t mask;
469 
470 	CPUMASK_ASSBIT(mask, cpuid);
471 
472 	KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
473 
474 	return get_cpu_node_by_cpumask(cpu_root_node, mask);
475 }
476 
477 /* Get the mask of siblings for level_type of a cpuid */
478 cpumask_t
479 get_cpumask_from_level(int cpuid,
480 			uint8_t level_type)
481 {
482 	cpu_node_t * node;
483 	cpumask_t mask;
484 
485 	CPUMASK_ASSBIT(mask, cpuid);
486 
487 	KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
488 
489 	node = get_cpu_node_by_cpumask(cpu_root_node, mask);
490 
491 	if (node == NULL) {
492 		CPUMASK_ASSZERO(mask);
493 		return mask;
494 	}
495 
496 	while (node != NULL) {
497 		if (node->type == level_type) {
498 			return node->members;
499 		}
500 		node = node->parent_node;
501 	}
502 	CPUMASK_ASSZERO(mask);
503 
504 	return mask;
505 }
506 
507 static const cpu_node_t *
508 get_cpu_node_by_chipid2(const cpu_node_t *node, int chip_id)
509 {
510 	int cpuid;
511 
512 	if (node->type != CHIP_LEVEL) {
513 		const cpu_node_t *ret = NULL;
514 		int i;
515 
516 		for (i = 0; i < node->child_no; ++i) {
517 			ret = get_cpu_node_by_chipid2(node->child_node[i],
518 			    chip_id);
519 			if (ret != NULL)
520 				break;
521 		}
522 		return ret;
523 	}
524 
525 	cpuid = BSRCPUMASK(node->members);
526 	if (get_chip_ID(cpuid) == chip_id)
527 		return node;
528 	return NULL;
529 }
530 
531 const cpu_node_t *
532 get_cpu_node_by_chipid(int chip_id)
533 {
534 	KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
535 	return get_cpu_node_by_chipid2(cpu_root_node, chip_id);
536 }
537 
538 /* init pcpu_sysctl structure info */
539 static void
540 init_pcpu_topology_sysctl(void)
541 {
542 	struct sbuf sb;
543 	cpumask_t mask;
544 	int min_id = -1;
545 	int max_id = -1;
546 	int i;
547 	int phys_id;
548 
549 	pcpu_sysctl = kmalloc(sizeof(*pcpu_sysctl) * MAXCPU, M_PCPUSYS,
550 			      M_INTWAIT | M_ZERO);
551 
552 	for (i = 0; i < ncpus; i++) {
553 		sbuf_new(&sb, pcpu_sysctl[i].cpu_name,
554 		    sizeof(pcpu_sysctl[i].cpu_name), SBUF_FIXEDLEN);
555 		sbuf_printf(&sb,"cpu%d", i);
556 		sbuf_finish(&sb);
557 
558 
559 		/* Get physical siblings */
560 		mask = get_cpumask_from_level(i, CHIP_LEVEL);
561 		if (CPUMASK_TESTZERO(mask)) {
562 			pcpu_sysctl[i].physical_id = INVALID_ID;
563 			continue;
564 		}
565 
566 		sbuf_new(&sb, pcpu_sysctl[i].physical_siblings,
567 		    sizeof(pcpu_sysctl[i].physical_siblings), SBUF_FIXEDLEN);
568 		sbuf_print_cpuset(&sb, &mask);
569 		sbuf_trim(&sb);
570 		sbuf_finish(&sb);
571 
572 		phys_id = get_chip_ID(i);
573 		pcpu_sysctl[i].physical_id = phys_id;
574 		if (min_id < 0 || min_id > phys_id)
575 			min_id = phys_id;
576 		if (max_id < 0 || max_id < phys_id)
577 			max_id = phys_id;
578 
579 		/* Get core siblings */
580 		mask = get_cpumask_from_level(i, CORE_LEVEL);
581 		if (CPUMASK_TESTZERO(mask)) {
582 			pcpu_sysctl[i].core_id = INVALID_ID;
583 			continue;
584 		}
585 
586 		sbuf_new(&sb, pcpu_sysctl[i].core_siblings,
587 		    sizeof(pcpu_sysctl[i].core_siblings), SBUF_FIXEDLEN);
588 		sbuf_print_cpuset(&sb, &mask);
589 		sbuf_trim(&sb);
590 		sbuf_finish(&sb);
591 
592 		pcpu_sysctl[i].core_id = get_core_number_within_chip(i);
593 		if (cpu_topology_core_ids < pcpu_sysctl[i].core_id)
594 			cpu_topology_core_ids = pcpu_sysctl[i].core_id + 1;
595 
596 	}
597 
598 	/*
599 	 * Normalize physical ids so they can be used by the VM system.
600 	 * Some systems number starting at 0 others number starting at 1.
601 	 */
602 	cpu_topology_phys_ids = max_id - min_id + 1;
603 	if (cpu_topology_phys_ids <= 0)		/* don't crash */
604 		cpu_topology_phys_ids = 1;
605 	for (i = 0; i < ncpus; i++) {
606 		pcpu_sysctl[i].physical_id %= cpu_topology_phys_ids;
607 	}
608 }
609 
610 /* Build SYSCTL structure for revealing
611  * the CPU Topology to user-space.
612  */
613 static void
614 build_sysctl_cpu_topology(void)
615 {
616 	int i;
617 	struct sbuf sb;
618 
619 	/* SYSCTL new leaf for "cpu_topology" */
620 	sysctl_ctx_init(&cpu_topology_sysctl_ctx);
621 	cpu_topology_sysctl_tree = SYSCTL_ADD_NODE(&cpu_topology_sysctl_ctx,
622 	    SYSCTL_STATIC_CHILDREN(_hw),
623 	    OID_AUTO,
624 	    "cpu_topology",
625 	    CTLFLAG_RD, 0, "");
626 
627 	/* SYSCTL cpu_topology "tree" entry */
628 	SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx,
629 	    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
630 	    OID_AUTO, "tree", CTLTYPE_STRING | CTLFLAG_RD,
631 	    NULL, 0, print_cpu_topology_tree_sysctl, "A",
632 	    "Tree print of CPU topology");
633 
634 	/* SYSCTL cpu_topology "level_description" entry */
635 	SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx,
636 	    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
637 	    OID_AUTO, "level_description", CTLTYPE_STRING | CTLFLAG_RD,
638 	    NULL, 0, print_cpu_topology_level_description_sysctl, "A",
639 	    "Level description of CPU topology");
640 
641 	/* SYSCTL cpu_topology "members" entry */
642 	sbuf_new(&sb, cpu_topology_members,
643 	    sizeof(cpu_topology_members), SBUF_FIXEDLEN);
644 	sbuf_print_cpuset(&sb, &cpu_root_node->members);
645 	sbuf_trim(&sb);
646 	sbuf_finish(&sb);
647 	SYSCTL_ADD_STRING(&cpu_topology_sysctl_ctx,
648 	    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
649 	    OID_AUTO, "members", CTLFLAG_RD,
650 	    cpu_topology_members, 0,
651 	    "Members of the CPU Topology");
652 
653 	/* SYSCTL per_cpu info */
654 	for (i = 0; i < ncpus; i++) {
655 		/* New leaf : hw.cpu_topology.cpux */
656 		sysctl_ctx_init(&pcpu_sysctl[i].sysctl_ctx);
657 		pcpu_sysctl[i].sysctl_tree = SYSCTL_ADD_NODE(&pcpu_sysctl[i].sysctl_ctx,
658 		    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
659 		    OID_AUTO,
660 		    pcpu_sysctl[i].cpu_name,
661 		    CTLFLAG_RD, 0, "");
662 
663 		/* Check if the physical_id found is valid */
664 		if (pcpu_sysctl[i].physical_id == INVALID_ID) {
665 			continue;
666 		}
667 
668 		/* Add physical id info */
669 		SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx,
670 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
671 		    OID_AUTO, "physical_id", CTLFLAG_RD,
672 		    &pcpu_sysctl[i].physical_id, 0,
673 		    "Physical ID");
674 
675 		/* Add physical siblings */
676 		SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx,
677 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
678 		    OID_AUTO, "physical_siblings", CTLFLAG_RD,
679 		    pcpu_sysctl[i].physical_siblings, 0,
680 		    "Physical siblings");
681 
682 		/* Check if the core_id found is valid */
683 		if (pcpu_sysctl[i].core_id == INVALID_ID) {
684 			continue;
685 		}
686 
687 		/* Add core id info */
688 		SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx,
689 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
690 		    OID_AUTO, "core_id", CTLFLAG_RD,
691 		    &pcpu_sysctl[i].core_id, 0,
692 		    "Core ID");
693 
694 		/*Add core siblings */
695 		SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx,
696 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
697 		    OID_AUTO, "core_siblings", CTLFLAG_RD,
698 		    pcpu_sysctl[i].core_siblings, 0,
699 		    "Core siblings");
700 	}
701 }
702 
703 static
704 void
705 sbuf_print_cpuset(struct sbuf *sb, cpumask_t *mask)
706 {
707 	int i;
708 	int b = -1;
709 	int e = -1;
710 	int more = 0;
711 
712 	sbuf_printf(sb, "cpus(");
713 	CPUSET_FOREACH(i, *mask) {
714 		if (b < 0) {
715 			b = i;
716 			e = b + 1;
717 			continue;
718 		}
719 		if (e == i) {
720 			++e;
721 			continue;
722 		}
723 		if (more)
724 			sbuf_printf(sb, ", ");
725 		if (b == e - 1) {
726 			sbuf_printf(sb, "%d", b);
727 		} else {
728 			sbuf_printf(sb, "%d-%d", b, e - 1);
729 		}
730 		more = 1;
731 		b = i;
732 		e = b + 1;
733 	}
734 	if (more)
735 		sbuf_printf(sb, ", ");
736 	if (b >= 0) {
737 		if (b == e - 1) {
738 			sbuf_printf(sb, "%d", b);
739 		} else {
740 			sbuf_printf(sb, "%d-%d", b, e - 1);
741 		}
742 	}
743 	sbuf_printf(sb, ") ");
744 }
745 
746 int
747 get_cpu_core_id(int cpuid)
748 {
749 	if (pcpu_sysctl)
750 		return(pcpu_sysctl[cpuid].core_id);
751 	return(0);
752 }
753 
754 int
755 get_cpu_phys_id(int cpuid)
756 {
757 	if (pcpu_sysctl)
758 		return(pcpu_sysctl[cpuid].physical_id);
759 	return(0);
760 }
761 
762 /* Build the CPU Topology and SYSCTL Topology tree */
763 static void
764 init_cpu_topology(void)
765 {
766 	build_cpu_topology();
767 
768 	init_pcpu_topology_sysctl();
769 	build_sysctl_cpu_topology();
770 }
771 SYSINIT(cpu_topology, SI_BOOT2_CPU_TOPOLOGY, SI_ORDER_FIRST,
772     init_cpu_topology, NULL);
773