xref: /dragonfly/sys/kern/subr_cpu_topology.c (revision f503b4c4)
1 /*
2  * Copyright (c) 2012 The DragonFly Project.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  * 3. Neither the name of The DragonFly Project nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific, prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
22  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/sbuf.h>
38 #include <sys/cpu_topology.h>
39 
40 #include <machine/smp.h>
41 
42 #ifndef NAPICID
43 #define NAPICID 256
44 #endif
45 
46 #define INDENT_BUF_SIZE LEVEL_NO*3
47 #define INVALID_ID -1
48 
49 /* Per-cpu sysctl nodes and info */
50 struct per_cpu_sysctl_info {
51 	struct sysctl_ctx_list sysctl_ctx;
52 	struct sysctl_oid *sysctl_tree;
53 	char cpu_name[32];
54 	int physical_id;
55 	int core_id;
56 	char physical_siblings[8*MAXCPU];
57 	char core_siblings[8*MAXCPU];
58 };
59 typedef struct per_cpu_sysctl_info per_cpu_sysctl_info_t;
60 
61 static cpu_node_t cpu_topology_nodes[MAXCPU];	/* Memory for topology */
62 static cpu_node_t *cpu_root_node;		/* Root node pointer */
63 
64 static struct sysctl_ctx_list cpu_topology_sysctl_ctx;
65 static struct sysctl_oid *cpu_topology_sysctl_tree;
66 static char cpu_topology_members[8*MAXCPU];
67 static per_cpu_sysctl_info_t pcpu_sysctl[MAXCPU];
68 static void sbuf_print_cpuset(struct sbuf *sb, cpumask_t *mask);
69 
70 int cpu_topology_levels_number = 1;
71 cpu_node_t *root_cpu_node;
72 
73 /* Get the next valid apicid starting
74  * from current apicid (curr_apicid
75  */
76 static int
77 get_next_valid_apicid(int curr_apicid)
78 {
79 	int next_apicid = curr_apicid;
80 	do {
81 		next_apicid++;
82 	}
83 	while(get_cpuid_from_apicid(next_apicid) == -1 &&
84 	   next_apicid < NAPICID);
85 	if (next_apicid == NAPICID) {
86 		kprintf("Warning: No next valid APICID found. Returning -1\n");
87 		return -1;
88 	}
89 	return next_apicid;
90 }
91 
92 /* Generic topology tree. The parameters have the following meaning:
93  * - children_no_per_level : the number of children on each level
94  * - level_types : the type of the level (THREAD, CORE, CHIP, etc)
95  * - cur_level : the current level of the tree
96  * - node : the current node
97  * - last_free_node : the last free node in the global array.
98  * - cpuid : basicly this are the ids of the leafs
99  */
100 static void
101 build_topology_tree(int *children_no_per_level,
102    uint8_t *level_types,
103    int cur_level,
104    cpu_node_t *node,
105    cpu_node_t **last_free_node,
106    int *apicid)
107 {
108 	int i;
109 
110 	node->child_no = children_no_per_level[cur_level];
111 	node->type = level_types[cur_level];
112 	CPUMASK_ASSZERO(node->members);
113 	node->compute_unit_id = -1;
114 
115 	if (node->child_no == 0) {
116 		*apicid = get_next_valid_apicid(*apicid);
117 		CPUMASK_ASSBIT(node->members, get_cpuid_from_apicid(*apicid));
118 		return;
119 	}
120 
121 	if (node->parent_node == NULL)
122 		root_cpu_node = node;
123 
124 	for (i = 0; i < node->child_no; i++) {
125 		node->child_node[i] = *last_free_node;
126 		(*last_free_node)++;
127 
128 		node->child_node[i]->parent_node = node;
129 
130 		build_topology_tree(children_no_per_level,
131 		    level_types,
132 		    cur_level + 1,
133 		    node->child_node[i],
134 		    last_free_node,
135 		    apicid);
136 
137 		CPUMASK_ORMASK(node->members, node->child_node[i]->members);
138 	}
139 }
140 
141 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
142 static void
143 migrate_elements(cpu_node_t **a, int n, int pos)
144 {
145 	int i;
146 
147 	for (i = pos; i < n - 1 ; i++) {
148 		a[i] = a[i+1];
149 	}
150 	a[i] = NULL;
151 }
152 #endif
153 
154 /* Build CPU topology. The detection is made by comparing the
155  * chip, core and logical IDs of each CPU with the IDs of the
156  * BSP. When we found a match, at that level the CPUs are siblings.
157  */
158 static void
159 build_cpu_topology(void)
160 {
161 	detect_cpu_topology();
162 	int i;
163 	int BSPID = 0;
164 	int threads_per_core = 0;
165 	int cores_per_chip = 0;
166 	int chips_per_package = 0;
167 	int children_no_per_level[LEVEL_NO];
168 	uint8_t level_types[LEVEL_NO];
169 	int apicid = -1;
170 
171 	cpu_node_t *root = &cpu_topology_nodes[0];
172 	cpu_node_t *last_free_node = root + 1;
173 
174 	/* Assume that the topology is uniform.
175 	 * Find the number of siblings within chip
176 	 * and witin core to build up the topology
177 	 */
178 	for (i = 0; i < ncpus; i++) {
179 		cpumask_t mask;
180 
181 		CPUMASK_ASSBIT(mask, i);
182 
183 		if (CPUMASK_TESTMASK(mask, smp_active_mask) == 0)
184 			continue;
185 
186 		if (get_chip_ID(BSPID) == get_chip_ID(i))
187 			cores_per_chip++;
188 		else
189 			continue;
190 
191 		if (get_core_number_within_chip(BSPID) ==
192 		    get_core_number_within_chip(i))
193 			threads_per_core++;
194 	}
195 
196 	cores_per_chip /= threads_per_core;
197 	chips_per_package = ncpus / (cores_per_chip * threads_per_core);
198 
199 	if (bootverbose)
200 		kprintf("CPU Topology: cores_per_chip: %d; threads_per_core: %d; chips_per_package: %d;\n",
201 		    cores_per_chip, threads_per_core, chips_per_package);
202 
203 	if (threads_per_core > 1) { /* HT available - 4 levels */
204 
205 		children_no_per_level[0] = chips_per_package;
206 		children_no_per_level[1] = cores_per_chip;
207 		children_no_per_level[2] = threads_per_core;
208 		children_no_per_level[3] = 0;
209 
210 		level_types[0] = PACKAGE_LEVEL;
211 		level_types[1] = CHIP_LEVEL;
212 		level_types[2] = CORE_LEVEL;
213 		level_types[3] = THREAD_LEVEL;
214 
215 		build_topology_tree(children_no_per_level,
216 		    level_types,
217 		    0,
218 		    root,
219 		    &last_free_node,
220 		    &apicid);
221 
222 		cpu_topology_levels_number = 4;
223 
224 	} else if (cores_per_chip > 1) { /* No HT available - 3 levels */
225 
226 		children_no_per_level[0] = chips_per_package;
227 		children_no_per_level[1] = cores_per_chip;
228 		children_no_per_level[2] = 0;
229 
230 		level_types[0] = PACKAGE_LEVEL;
231 		level_types[1] = CHIP_LEVEL;
232 		level_types[2] = CORE_LEVEL;
233 
234 		build_topology_tree(children_no_per_level,
235 		    level_types,
236 		    0,
237 		    root,
238 		    &last_free_node,
239 		    &apicid);
240 
241 		cpu_topology_levels_number = 3;
242 
243 	} else { /* No HT and no Multi-Core - 2 levels */
244 
245 		children_no_per_level[0] = chips_per_package;
246 		children_no_per_level[1] = 0;
247 
248 		level_types[0] = PACKAGE_LEVEL;
249 		level_types[1] = CHIP_LEVEL;
250 
251 		build_topology_tree(children_no_per_level,
252 		    level_types,
253 		    0,
254 		    root,
255 		    &last_free_node,
256 		    &apicid);
257 
258 		cpu_topology_levels_number = 2;
259 
260 	}
261 
262 	cpu_root_node = root;
263 
264 
265 #if defined(__x86_64__) && !defined(_KERNEL_VIRTUAL)
266 	if (fix_amd_topology() == 0) {
267 		int visited[MAXCPU], i, j, pos, cpuid;
268 		cpu_node_t *leaf, *parent;
269 
270 		bzero(visited, MAXCPU * sizeof(int));
271 
272 		for (i = 0; i < ncpus; i++) {
273 			if (visited[i] == 0) {
274 				pos = 0;
275 				visited[i] = 1;
276 				leaf = get_cpu_node_by_cpuid(i);
277 
278 				if (leaf->type == CORE_LEVEL) {
279 					parent = leaf->parent_node;
280 
281 					last_free_node->child_node[0] = leaf;
282 					last_free_node->child_no = 1;
283 					last_free_node->members = leaf->members;
284 					last_free_node->compute_unit_id = leaf->compute_unit_id;
285 					last_free_node->parent_node = parent;
286 					last_free_node->type = CORE_LEVEL;
287 
288 
289 					for (j = 0; j < parent->child_no; j++) {
290 						if (parent->child_node[j] != leaf) {
291 
292 							cpuid = BSFCPUMASK(parent->child_node[j]->members);
293 							if (visited[cpuid] == 0 &&
294 							    parent->child_node[j]->compute_unit_id == leaf->compute_unit_id) {
295 
296 								last_free_node->child_node[last_free_node->child_no] = parent->child_node[j];
297 								last_free_node->child_no++;
298 								CPUMASK_ORMASK(last_free_node->members, parent->child_node[j]->members);
299 
300 								parent->child_node[j]->type = THREAD_LEVEL;
301 								parent->child_node[j]->parent_node = last_free_node;
302 								visited[cpuid] = 1;
303 
304 								migrate_elements(parent->child_node, parent->child_no, j);
305 								parent->child_no--;
306 								j--;
307 							}
308 						} else {
309 							pos = j;
310 						}
311 					}
312 					if (last_free_node->child_no > 1) {
313 						parent->child_node[pos] = last_free_node;
314 						leaf->type = THREAD_LEVEL;
315 						leaf->parent_node = last_free_node;
316 						last_free_node++;
317 					}
318 				}
319 			}
320 		}
321 	}
322 #endif
323 }
324 
325 /* Recursive function helper to print the CPU topology tree */
326 static void
327 print_cpu_topology_tree_sysctl_helper(cpu_node_t *node,
328     struct sbuf *sb,
329     char * buf,
330     int buf_len,
331     int last)
332 {
333 	int i;
334 	int bsr_member;
335 
336 	sbuf_bcat(sb, buf, buf_len);
337 	if (last) {
338 		sbuf_printf(sb, "\\-");
339 		buf[buf_len] = ' ';buf_len++;
340 		buf[buf_len] = ' ';buf_len++;
341 	} else {
342 		sbuf_printf(sb, "|-");
343 		buf[buf_len] = '|';buf_len++;
344 		buf[buf_len] = ' ';buf_len++;
345 	}
346 
347 	bsr_member = BSRCPUMASK(node->members);
348 
349 	if (node->type == PACKAGE_LEVEL) {
350 		sbuf_printf(sb,"PACKAGE MEMBERS: ");
351 	} else if (node->type == CHIP_LEVEL) {
352 		sbuf_printf(sb,"CHIP ID %d: ",
353 			get_chip_ID(bsr_member));
354 	} else if (node->type == CORE_LEVEL) {
355 		if (node->compute_unit_id != (uint8_t)-1) {
356 			sbuf_printf(sb,"Compute Unit ID %d: ",
357 				node->compute_unit_id);
358 		} else {
359 			sbuf_printf(sb,"CORE ID %d: ",
360 				get_core_number_within_chip(bsr_member));
361 		}
362 	} else if (node->type == THREAD_LEVEL) {
363 		if (node->compute_unit_id != (uint8_t)-1) {
364 			sbuf_printf(sb,"CORE ID %d: ",
365 				get_core_number_within_chip(bsr_member));
366 		} else {
367 			sbuf_printf(sb,"THREAD ID %d: ",
368 				get_logical_CPU_number_within_core(bsr_member));
369 		}
370 	} else {
371 		sbuf_printf(sb,"UNKNOWN: ");
372 	}
373 	sbuf_print_cpuset(sb, &node->members);
374 	sbuf_printf(sb,"\n");
375 
376 	for (i = 0; i < node->child_no; i++) {
377 		print_cpu_topology_tree_sysctl_helper(node->child_node[i],
378 		    sb, buf, buf_len, i == (node->child_no -1));
379 	}
380 }
381 
382 /* SYSCTL PROCEDURE for printing the CPU Topology tree */
383 static int
384 print_cpu_topology_tree_sysctl(SYSCTL_HANDLER_ARGS)
385 {
386 	struct sbuf *sb;
387 	int ret;
388 	char buf[INDENT_BUF_SIZE];
389 
390 	KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
391 
392 	sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND);
393 	if (sb == NULL) {
394 		return (ENOMEM);
395 	}
396 	sbuf_printf(sb,"\n");
397 	print_cpu_topology_tree_sysctl_helper(cpu_root_node, sb, buf, 0, 1);
398 
399 	sbuf_finish(sb);
400 
401 	ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
402 
403 	sbuf_delete(sb);
404 
405 	return ret;
406 }
407 
408 /* SYSCTL PROCEDURE for printing the CPU Topology level description */
409 static int
410 print_cpu_topology_level_description_sysctl(SYSCTL_HANDLER_ARGS)
411 {
412 	struct sbuf *sb;
413 	int ret;
414 
415 	sb = sbuf_new(NULL, NULL, 500, SBUF_AUTOEXTEND);
416 	if (sb == NULL)
417 		return (ENOMEM);
418 
419 	if (cpu_topology_levels_number == 4) /* HT available */
420 		sbuf_printf(sb, "0 - thread; 1 - core; 2 - socket; 3 - anything");
421 	else if (cpu_topology_levels_number == 3) /* No HT available */
422 		sbuf_printf(sb, "0 - core; 1 - socket; 2 - anything");
423 	else if (cpu_topology_levels_number == 2) /* No HT and no Multi-Core */
424 		sbuf_printf(sb, "0 - socket; 1 - anything");
425 	else
426 		sbuf_printf(sb, "Unknown");
427 
428 	sbuf_finish(sb);
429 
430 	ret = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb));
431 
432 	sbuf_delete(sb);
433 
434 	return ret;
435 }
436 
437 /* Find a cpu_node_t by a mask */
438 static cpu_node_t *
439 get_cpu_node_by_cpumask(cpu_node_t * node,
440 			cpumask_t mask) {
441 
442 	cpu_node_t * found = NULL;
443 	int i;
444 
445 	if (CPUMASK_CMPMASKEQ(node->members, mask))
446 		return node;
447 
448 	for (i = 0; i < node->child_no; i++) {
449 		found = get_cpu_node_by_cpumask(node->child_node[i], mask);
450 		if (found != NULL) {
451 			return found;
452 		}
453 	}
454 	return NULL;
455 }
456 
457 cpu_node_t *
458 get_cpu_node_by_cpuid(int cpuid) {
459 	cpumask_t mask;
460 
461 	CPUMASK_ASSBIT(mask, cpuid);
462 
463 	KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
464 
465 	return get_cpu_node_by_cpumask(cpu_root_node, mask);
466 }
467 
468 /* Get the mask of siblings for level_type of a cpuid */
469 cpumask_t
470 get_cpumask_from_level(int cpuid,
471 			uint8_t level_type)
472 {
473 	cpu_node_t * node;
474 	cpumask_t mask;
475 
476 	CPUMASK_ASSBIT(mask, cpuid);
477 
478 	KASSERT(cpu_root_node != NULL, ("cpu_root_node isn't initialized"));
479 
480 	node = get_cpu_node_by_cpumask(cpu_root_node, mask);
481 
482 	if (node == NULL) {
483 		CPUMASK_ASSZERO(mask);
484 		return mask;
485 	}
486 
487 	while (node != NULL) {
488 		if (node->type == level_type) {
489 			return node->members;
490 		}
491 		node = node->parent_node;
492 	}
493 	CPUMASK_ASSZERO(mask);
494 
495 	return mask;
496 }
497 
498 /* init pcpu_sysctl structure info */
499 static void
500 init_pcpu_topology_sysctl(void)
501 {
502 	int i;
503 	cpumask_t mask;
504 	struct sbuf sb;
505 
506 	for (i = 0; i < ncpus; i++) {
507 
508 		sbuf_new(&sb, pcpu_sysctl[i].cpu_name,
509 		    sizeof(pcpu_sysctl[i].cpu_name), SBUF_FIXEDLEN);
510 		sbuf_printf(&sb,"cpu%d", i);
511 		sbuf_finish(&sb);
512 
513 
514 		/* Get physical siblings */
515 		mask = get_cpumask_from_level(i, CHIP_LEVEL);
516 		if (CPUMASK_TESTZERO(mask)) {
517 			pcpu_sysctl[i].physical_id = INVALID_ID;
518 			continue;
519 		}
520 
521 		sbuf_new(&sb, pcpu_sysctl[i].physical_siblings,
522 		    sizeof(pcpu_sysctl[i].physical_siblings), SBUF_FIXEDLEN);
523 		sbuf_print_cpuset(&sb, &mask);
524 		sbuf_trim(&sb);
525 		sbuf_finish(&sb);
526 
527 		pcpu_sysctl[i].physical_id = get_chip_ID(i);
528 
529 		/* Get core siblings */
530 		mask = get_cpumask_from_level(i, CORE_LEVEL);
531 		if (CPUMASK_TESTZERO(mask)) {
532 			pcpu_sysctl[i].core_id = INVALID_ID;
533 			continue;
534 		}
535 
536 		sbuf_new(&sb, pcpu_sysctl[i].core_siblings,
537 		    sizeof(pcpu_sysctl[i].core_siblings), SBUF_FIXEDLEN);
538 		sbuf_print_cpuset(&sb, &mask);
539 		sbuf_trim(&sb);
540 		sbuf_finish(&sb);
541 
542 		pcpu_sysctl[i].core_id = get_core_number_within_chip(i);
543 
544 	}
545 }
546 
547 /* Build SYSCTL structure for revealing
548  * the CPU Topology to user-space.
549  */
550 static void
551 build_sysctl_cpu_topology(void)
552 {
553 	int i;
554 	struct sbuf sb;
555 
556 	/* SYSCTL new leaf for "cpu_topology" */
557 	sysctl_ctx_init(&cpu_topology_sysctl_ctx);
558 	cpu_topology_sysctl_tree = SYSCTL_ADD_NODE(&cpu_topology_sysctl_ctx,
559 	    SYSCTL_STATIC_CHILDREN(_hw),
560 	    OID_AUTO,
561 	    "cpu_topology",
562 	    CTLFLAG_RD, 0, "");
563 
564 	/* SYSCTL cpu_topology "tree" entry */
565 	SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx,
566 	    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
567 	    OID_AUTO, "tree", CTLTYPE_STRING | CTLFLAG_RD,
568 	    NULL, 0, print_cpu_topology_tree_sysctl, "A",
569 	    "Tree print of CPU topology");
570 
571 	/* SYSCTL cpu_topology "level_description" entry */
572 	SYSCTL_ADD_PROC(&cpu_topology_sysctl_ctx,
573 	    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
574 	    OID_AUTO, "level_description", CTLTYPE_STRING | CTLFLAG_RD,
575 	    NULL, 0, print_cpu_topology_level_description_sysctl, "A",
576 	    "Level description of CPU topology");
577 
578 	/* SYSCTL cpu_topology "members" entry */
579 	sbuf_new(&sb, cpu_topology_members,
580 	    sizeof(cpu_topology_members), SBUF_FIXEDLEN);
581 	sbuf_print_cpuset(&sb, &cpu_root_node->members);
582 	sbuf_trim(&sb);
583 	sbuf_finish(&sb);
584 	SYSCTL_ADD_STRING(&cpu_topology_sysctl_ctx,
585 	    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
586 	    OID_AUTO, "members", CTLFLAG_RD,
587 	    cpu_topology_members, 0,
588 	    "Members of the CPU Topology");
589 
590 	/* SYSCTL per_cpu info */
591 	for (i = 0; i < ncpus; i++) {
592 		/* New leaf : hw.cpu_topology.cpux */
593 		sysctl_ctx_init(&pcpu_sysctl[i].sysctl_ctx);
594 		pcpu_sysctl[i].sysctl_tree = SYSCTL_ADD_NODE(&pcpu_sysctl[i].sysctl_ctx,
595 		    SYSCTL_CHILDREN(cpu_topology_sysctl_tree),
596 		    OID_AUTO,
597 		    pcpu_sysctl[i].cpu_name,
598 		    CTLFLAG_RD, 0, "");
599 
600 		/* Check if the physical_id found is valid */
601 		if (pcpu_sysctl[i].physical_id == INVALID_ID) {
602 			continue;
603 		}
604 
605 		/* Add physical id info */
606 		SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx,
607 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
608 		    OID_AUTO, "physical_id", CTLFLAG_RD,
609 		    &pcpu_sysctl[i].physical_id, 0,
610 		    "Physical ID");
611 
612 		/* Add physical siblings */
613 		SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx,
614 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
615 		    OID_AUTO, "physical_siblings", CTLFLAG_RD,
616 		    pcpu_sysctl[i].physical_siblings, 0,
617 		    "Physical siblings");
618 
619 		/* Check if the core_id found is valid */
620 		if (pcpu_sysctl[i].core_id == INVALID_ID) {
621 			continue;
622 		}
623 
624 		/* Add core id info */
625 		SYSCTL_ADD_INT(&pcpu_sysctl[i].sysctl_ctx,
626 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
627 		    OID_AUTO, "core_id", CTLFLAG_RD,
628 		    &pcpu_sysctl[i].core_id, 0,
629 		    "Core ID");
630 
631 		/*Add core siblings */
632 		SYSCTL_ADD_STRING(&pcpu_sysctl[i].sysctl_ctx,
633 		    SYSCTL_CHILDREN(pcpu_sysctl[i].sysctl_tree),
634 		    OID_AUTO, "core_siblings", CTLFLAG_RD,
635 		    pcpu_sysctl[i].core_siblings, 0,
636 		    "Core siblings");
637 	}
638 }
639 
640 static
641 void
642 sbuf_print_cpuset(struct sbuf *sb, cpumask_t *mask)
643 {
644 	int i;
645 	int b = -1;
646 	int e = -1;
647 	int more = 0;
648 
649 	sbuf_printf(sb, "cpus(");
650 	CPUSET_FOREACH(i, *mask) {
651 		if (b < 0) {
652 			b = i;
653 			e = b + 1;
654 			continue;
655 		}
656 		if (e == i) {
657 			++e;
658 			continue;
659 		}
660 		if (more)
661 			sbuf_printf(sb, ", ");
662 		if (b == e - 1) {
663 			sbuf_printf(sb, "%d", b);
664 		} else {
665 			sbuf_printf(sb, "%d-%d", b, e - 1);
666 		}
667 		more = 1;
668 		b = i;
669 		e = b + 1;
670 	}
671 	if (more)
672 		sbuf_printf(sb, ", ");
673 	if (b >= 0) {
674 		if (b == e + 1) {
675 			sbuf_printf(sb, "%d", b);
676 		} else {
677 			sbuf_printf(sb, "%d-%d", b, e - 1);
678 		}
679 	}
680 	sbuf_printf(sb, ") ");
681 }
682 
683 /* Build the CPU Topology and SYSCTL Topology tree */
684 static void
685 init_cpu_topology(void)
686 {
687 	build_cpu_topology();
688 
689 	init_pcpu_topology_sysctl();
690 	build_sysctl_cpu_topology();
691 }
692 SYSINIT(cpu_topology, SI_BOOT2_CPU_TOPOLOGY, SI_ORDER_FIRST,
693     init_cpu_topology, NULL)
694