1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * isst_tpmi.c: SST TPMI interface core
4  *
5  * Copyright (c) 2023, Intel Corporation.
6  * All Rights Reserved.
7  *
8  * This information will be useful to understand flows:
9  * In the current generation of platforms, TPMI is supported via OOB
10  * PCI device. This PCI device has one instance per CPU package.
11  * There is a unique TPMI ID for SST. Each TPMI ID also has multiple
12  * entries, representing per power domain information.
13  *
14  * There is one dev file for complete SST information and control same as the
15  * prior generation of hardware. User spaces don't need to know how the
16  * information is presented by the hardware. The TPMI core module implements
17  * the hardware mapping.
18  */
19 
20 #include <linux/auxiliary_bus.h>
21 #include <linux/delay.h>
22 #include <linux/intel_tpmi.h>
23 #include <linux/fs.h>
24 #include <linux/io.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <uapi/linux/isst_if.h>
28 
29 #include "isst_tpmi_core.h"
30 #include "isst_if_common.h"
31 
32 /* Supported SST hardware version by this driver */
33 #define ISST_MAJOR_VERSION	0
34 #define ISST_MINOR_VERSION	1
35 
36 /*
37  * Used to indicate if value read from MMIO needs to get multiplied
38  * to get to a standard unit or not.
39  */
40 #define SST_MUL_FACTOR_NONE    1
41 
42 /* Define 100 as a scaling factor frequency ratio to frequency conversion */
43 #define SST_MUL_FACTOR_FREQ    100
44 
45 /* All SST regs are 64 bit size */
46 #define SST_REG_SIZE   8
47 
48 /**
49  * struct sst_header -	SST main header
50  * @interface_version:	Version number for this interface
51  * @cap_mask:		Bitmask of the supported sub features. 1=the sub feature is enabled.
52  *			0=disabled.
53  *			Bit[8]= SST_CP enable (1), disable (0)
54  *			bit[9]= SST_PP enable (1), disable (0)
55  *			other bits are reserved for future use
56  * @cp_offset:		Qword (8 bytes) offset to the SST_CP register bank
57  * @pp_offset:		Qword (8 bytes) offset to the SST_PP register bank
58  * @reserved:		Reserved for future use
59  *
60  * This register allows SW to discover SST capability and the offsets to SST-CP
61  * and SST-PP register banks.
62  */
63 struct sst_header {
64 	u8 interface_version;
65 	u8 cap_mask;
66 	u8 cp_offset;
67 	u8 pp_offset;
68 	u32 reserved;
69 } __packed;
70 
71 /**
72  * struct cp_header -	SST-CP (core-power) header
73  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
74  * @feature_rev:	Interface Version number for this SST feature
75  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
76  * @reserved:		Reserved for future use
77  *
78  * This structure is used store SST-CP header. This is packed to the same
79  * format as defined in the specifications.
80  */
81 struct cp_header {
82 	u64 feature_id :4;
83 	u64 feature_rev :8;
84 	u64 ratio_unit :2;
85 	u64 reserved :50;
86 } __packed;
87 
88 /**
89  * struct pp_header -	SST-PP (Perf profile) header
90  * @feature_id:		0=SST-CP, 1=SST-PP, 2=SST-BF, 3=SST-TF
91  * @feature_rev:	Interface Version number for this SST feature
92  * @level_en_mask:	SST-PP level enable/disable fuse mask
93  * @allowed_level_mask:	Allowed level mask used for dynamic config level switching
94  * @reserved0:		Reserved for future use
95  * @ratio_unit:		Frequency ratio unit. 00: 100MHz. All others are reserved
96  * @block_size:		Size of PP block in Qword unit (8 bytes)
97  * @dynamic_switch:	If set (1), dynamic switching of SST PP is supported
98  * @memory_ratio_unit:	Memory Controller frequency ratio unit. 00: 100MHz, others reserved
99  * @reserved1:		Reserved for future use
100  *
101  * This structure is used store SST-PP header. This is packed to the same
102  * format as defined in the specifications.
103  */
104 struct pp_header {
105 	u64 feature_id :4;
106 	u64 feature_rev :8;
107 	u64 level_en_mask :8;
108 	u64 allowed_level_mask :8;
109 	u64 reserved0 :4;
110 	u64 ratio_unit :2;
111 	u64 block_size :8;
112 	u64 dynamic_switch :1;
113 	u64 memory_ratio_unit :2;
114 	u64 reserved1 :19;
115 } __packed;
116 
117 /**
118  * struct feature_offset -	Offsets to SST-PP features
119  * @pp_offset:		Qword offset within PP level for the SST_PP register bank
120  * @bf_offset:		Qword offset within PP level for the SST_BF register bank
121  * @tf_offset:		Qword offset within PP level for the SST_TF register bank
122  * @reserved:		Reserved for future use
123  *
124  * This structure is used store offsets for SST features in the register bank.
125  * This is packed to the same format as defined in the specifications.
126  */
127 struct feature_offset {
128 	u64 pp_offset :8;
129 	u64 bf_offset :8;
130 	u64 tf_offset :8;
131 	u64 reserved :40;
132 } __packed;
133 
134 /**
135  * struct levels_offset -	Offsets to each SST PP level
136  * @sst_pp_level0_offset:	Qword offset to the register block of PP level 0
137  * @sst_pp_level1_offset:	Qword offset to the register block of PP level 1
138  * @sst_pp_level2_offset:	Qword offset to the register block of PP level 2
139  * @sst_pp_level3_offset:	Qword offset to the register block of PP level 3
140  * @sst_pp_level4_offset:	Qword offset to the register block of PP level 4
141  * @reserved:			Reserved for future use
142  *
143  * This structure is used store offsets of SST PP levels in the register bank.
144  * This is packed to the same format as defined in the specifications.
145  */
146 struct levels_offset {
147 	u64 sst_pp_level0_offset :8;
148 	u64 sst_pp_level1_offset :8;
149 	u64 sst_pp_level2_offset :8;
150 	u64 sst_pp_level3_offset :8;
151 	u64 sst_pp_level4_offset :8;
152 	u64 reserved :24;
153 } __packed;
154 
155 /**
156  * struct pp_control_offset -	Offsets for SST PP controls
157  * @perf_level:		A SST-PP level that SW intends to switch to
158  * @perf_level_lock:	SST-PP level select lock. 0 - unlocked. 1 - locked till next reset
159  * @resvd0:		Reserved for future use
160  * @current_state:	Bit mask to control the enable(1)/disable(0) state of each feature
161  *			of the current PP level, bit 0 = BF, bit 1 = TF, bit 2-7 = reserved
162  * @reserved:		Reserved for future use
163  *
164  * This structure is used store offsets of SST PP controls in the register bank.
165  * This is packed to the same format as defined in the specifications.
166  */
167 struct pp_control_offset {
168 	u64 perf_level :3;
169 	u64 perf_level_lock :1;
170 	u64 resvd0 :4;
171 	u64 current_state :8;
172 	u64 reserved :48;
173 } __packed;
174 
175 /**
176  * struct pp_status_offset -	Offsets for SST PP status fields
177  * @sst_pp_level:	Returns the current SST-PP level
178  * @sst_pp_lock:	Returns the lock bit setting of perf_level_lock in pp_control_offset
179  * @error_type:		Returns last error of SST-PP level change request. 0: no error,
180  *			1: level change not allowed, others: reserved
181  * @feature_state:	Bit mask to indicate the enable(1)/disable(0) state of each feature of the
182  *			current PP level. bit 0 = BF, bit 1 = TF, bit 2-7 reserved
183  * @reserved0:		Reserved for future use
184  * @feature_error_type: Returns last error of the specific feature. Three error_type bits per
185  *			feature. i.e. ERROR_TYPE[2:0] for BF, ERROR_TYPE[5:3] for TF, etc.
186  *			0x0: no error, 0x1: The specific feature is not supported by the hardware.
187  *			0x2-0x6: Reserved. 0x7: feature state change is not allowed.
188  * @reserved1:		Reserved for future use
189  *
190  * This structure is used store offsets of SST PP status in the register bank.
191  * This is packed to the same format as defined in the specifications.
192  */
193 struct pp_status_offset {
194 	u64 sst_pp_level :3;
195 	u64 sst_pp_lock :1;
196 	u64 error_type :4;
197 	u64 feature_state :8;
198 	u64 reserved0 :16;
199 	u64 feature_error_type : 24;
200 	u64 reserved1 :8;
201 } __packed;
202 
203 /**
204  * struct perf_level -	Used to store perf level and mmio offset
205  * @mmio_offset:	mmio offset for a perf level
206  * @level:		perf level for this offset
207  *
208  * This structure is used store final mmio offset of each perf level from the
209  * SST base mmio offset.
210  */
211 struct perf_level {
212 	int mmio_offset;
213 	int level;
214 };
215 
216 /**
217  * struct tpmi_per_power_domain_info -	Store per power_domain SST info
218  * @package_id:		Package id for this power_domain
219  * @power_domain_id:	Power domain id, Each entry from the SST-TPMI instance is a power_domain.
220  * @max_level:		Max possible PP level possible for this power_domain
221  * @ratio_unit:		Ratio unit for converting to MHz
222  * @avx_levels:		Number of AVX levels
223  * @pp_block_size:	Block size from PP header
224  * @sst_header:		Store SST header for this power_domain
225  * @cp_header:		Store SST-CP header for this power_domain
226  * @pp_header:		Store SST-PP header for this power_domain
227  * @perf_levels:	Pointer to each perf level to map level to mmio offset
228  * @feature_offsets:	Store feature offsets for each PP-level
229  * @control_offset:	Store the control offset for each PP-level
230  * @status_offset:	Store the status offset for each PP-level
231  * @sst_base:		Mapped SST base IO memory
232  * @auxdev:		Auxiliary device instance enumerated this instance
233  * @saved_sst_cp_control: Save SST-CP control configuration to store restore for suspend/resume
234  * @saved_clos_configs:	Save SST-CP CLOS configuration to store restore for suspend/resume
235  * @saved_clos_assocs:	Save SST-CP CLOS association to store restore for suspend/resume
236  * @saved_pp_control:	Save SST-PP control information to store restore for suspend/resume
237  * @write_blocked:	Write operation is blocked, so can't change SST state
238  *
239  * This structure is used store complete SST information for a power_domain. This information
240  * is used to read/write request for any SST IOCTL. Each physical CPU package can have multiple
241  * power_domains. Each power domain describes its own SST information and has its own controls.
242  */
243 struct tpmi_per_power_domain_info {
244 	int package_id;
245 	int power_domain_id;
246 	int max_level;
247 	int ratio_unit;
248 	int avx_levels;
249 	int pp_block_size;
250 	struct sst_header sst_header;
251 	struct cp_header cp_header;
252 	struct pp_header pp_header;
253 	struct perf_level *perf_levels;
254 	struct feature_offset feature_offsets;
255 	struct pp_control_offset control_offset;
256 	struct pp_status_offset status_offset;
257 	void __iomem *sst_base;
258 	struct auxiliary_device *auxdev;
259 	u64 saved_sst_cp_control;
260 	u64 saved_clos_configs[4];
261 	u64 saved_clos_assocs[4];
262 	u64 saved_pp_control;
263 	bool write_blocked;
264 };
265 
266 /**
267  * struct tpmi_sst_struct -	Store sst info for a package
268  * @package_id:			Package id for this aux device instance
269  * @number_of_power_domains:	Number of power_domains pointed by power_domain_info pointer
270  * @power_domain_info:		Pointer to power domains information
271  *
272  * This structure is used store full SST information for a package.
273  * Each package has a unique OOB PCI device, which enumerates TPMI.
274  * Each Package will have multiple power_domains.
275  */
276 struct tpmi_sst_struct {
277 	int package_id;
278 	int number_of_power_domains;
279 	struct tpmi_per_power_domain_info *power_domain_info;
280 };
281 
282 /**
283  * struct tpmi_sst_common_struct -	Store all SST instances
284  * @max_index:		Maximum instances currently present
285  * @sst_inst:		Pointer to per package instance
286  *
287  * Stores every SST Package instance.
288  */
289 struct tpmi_sst_common_struct {
290 	int max_index;
291 	struct tpmi_sst_struct **sst_inst;
292 };
293 
294 /*
295  * Each IOCTL request is processed under this lock. Also used to protect
296  * registration functions and common data structures.
297  */
298 static DEFINE_MUTEX(isst_tpmi_dev_lock);
299 
300 /* Usage count to track, number of TPMI SST instances registered to this core. */
301 static int isst_core_usage_count;
302 
303 /* Stores complete SST information for every package and power_domain */
304 static struct tpmi_sst_common_struct isst_common;
305 
306 #define SST_MAX_AVX_LEVELS	3
307 
308 #define SST_PP_OFFSET_0		8
309 #define SST_PP_OFFSET_1		16
310 #define SST_PP_OFFSET_SIZE	8
311 
312 static int sst_add_perf_profiles(struct auxiliary_device *auxdev,
313 				 struct tpmi_per_power_domain_info *pd_info,
314 				 int levels)
315 {
316 	u64 perf_level_offsets;
317 	int i;
318 
319 	pd_info->perf_levels = devm_kcalloc(&auxdev->dev, levels,
320 					    sizeof(struct perf_level),
321 					    GFP_KERNEL);
322 	if (!pd_info->perf_levels)
323 		return 0;
324 
325 	pd_info->ratio_unit = pd_info->pp_header.ratio_unit;
326 	pd_info->avx_levels = SST_MAX_AVX_LEVELS;
327 	pd_info->pp_block_size = pd_info->pp_header.block_size;
328 
329 	/* Read PP Offset 0: Get feature offset with PP level */
330 	*((u64 *)&pd_info->feature_offsets) = readq(pd_info->sst_base +
331 						    pd_info->sst_header.pp_offset +
332 						    SST_PP_OFFSET_0);
333 
334 	perf_level_offsets = readq(pd_info->sst_base + pd_info->sst_header.pp_offset +
335 				   SST_PP_OFFSET_1);
336 
337 	for (i = 0; i < levels; ++i) {
338 		u64 offset;
339 
340 		offset = perf_level_offsets & (0xffULL << (i * SST_PP_OFFSET_SIZE));
341 		offset >>= (i * 8);
342 		offset &= 0xff;
343 		offset *= 8; /* Convert to byte from QWORD offset */
344 		pd_info->perf_levels[i].mmio_offset = pd_info->sst_header.pp_offset + offset;
345 	}
346 
347 	return 0;
348 }
349 
350 static int sst_main(struct auxiliary_device *auxdev, struct tpmi_per_power_domain_info *pd_info)
351 {
352 	int i, mask, levels;
353 
354 	*((u64 *)&pd_info->sst_header) = readq(pd_info->sst_base);
355 	pd_info->sst_header.cp_offset *= 8;
356 	pd_info->sst_header.pp_offset *= 8;
357 
358 	if (pd_info->sst_header.interface_version == TPMI_VERSION_INVALID)
359 		return -ENODEV;
360 
361 	if (TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version) != ISST_MAJOR_VERSION) {
362 		dev_err(&auxdev->dev, "SST: Unsupported major version:%lx\n",
363 			TPMI_MAJOR_VERSION(pd_info->sst_header.interface_version));
364 		return -ENODEV;
365 	}
366 
367 	if (TPMI_MINOR_VERSION(pd_info->sst_header.interface_version) != ISST_MINOR_VERSION)
368 		dev_info(&auxdev->dev, "SST: Ignore: Unsupported minor version:%lx\n",
369 			 TPMI_MINOR_VERSION(pd_info->sst_header.interface_version));
370 
371 	/* Read SST CP Header */
372 	*((u64 *)&pd_info->cp_header) = readq(pd_info->sst_base + pd_info->sst_header.cp_offset);
373 
374 	/* Read PP header */
375 	*((u64 *)&pd_info->pp_header) = readq(pd_info->sst_base + pd_info->sst_header.pp_offset);
376 
377 	mask = 0x01;
378 	levels = 0;
379 	for (i = 0; i < 8; ++i) {
380 		if (pd_info->pp_header.level_en_mask & mask)
381 			levels = i;
382 		mask <<= 1;
383 	}
384 	pd_info->max_level = levels;
385 	sst_add_perf_profiles(auxdev, pd_info, levels + 1);
386 
387 	return 0;
388 }
389 
390 /*
391  * Map a package and power_domain id to SST information structure unique for a power_domain.
392  * The caller should call under isst_tpmi_dev_lock.
393  */
394 static struct tpmi_per_power_domain_info *get_instance(int pkg_id, int power_domain_id)
395 {
396 	struct tpmi_per_power_domain_info *power_domain_info;
397 	struct tpmi_sst_struct *sst_inst;
398 
399 	if (pkg_id < 0 || pkg_id > isst_common.max_index ||
400 	    pkg_id >= topology_max_packages())
401 		return NULL;
402 
403 	sst_inst = isst_common.sst_inst[pkg_id];
404 	if (!sst_inst)
405 		return NULL;
406 
407 	if (power_domain_id < 0 || power_domain_id >= sst_inst->number_of_power_domains)
408 		return NULL;
409 
410 	power_domain_info = &sst_inst->power_domain_info[power_domain_id];
411 
412 	if (power_domain_info && !power_domain_info->sst_base)
413 		return NULL;
414 
415 	return power_domain_info;
416 }
417 
418 static bool disable_dynamic_sst_features(void)
419 {
420 	u64 value;
421 
422 	rdmsrl(MSR_PM_ENABLE, value);
423 	return !(value & 0x1);
424 }
425 
426 #define _read_cp_info(name_str, name, offset, start, width, mult_factor)\
427 {\
428 	u64 val, mask;\
429 	\
430 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
431 			(offset));\
432 	mask = GENMASK_ULL((start + width - 1), start);\
433 	val &= mask; \
434 	val >>= start;\
435 	name = (val * mult_factor);\
436 }
437 
438 #define _write_cp_info(name_str, name, offset, start, width, div_factor)\
439 {\
440 	u64 val, mask;\
441 	\
442 	val = readq(power_domain_info->sst_base +\
443 		    power_domain_info->sst_header.cp_offset + (offset));\
444 	mask = GENMASK_ULL((start + width - 1), start);\
445 	val &= ~mask;\
446 	val |= (name / div_factor) << start;\
447 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.cp_offset +\
448 		(offset));\
449 }
450 
451 #define	SST_CP_CONTROL_OFFSET	8
452 #define	SST_CP_STATUS_OFFSET	16
453 
454 #define SST_CP_ENABLE_START		0
455 #define SST_CP_ENABLE_WIDTH		1
456 
457 #define SST_CP_PRIORITY_TYPE_START	1
458 #define SST_CP_PRIORITY_TYPE_WIDTH	1
459 
460 static long isst_if_core_power_state(void __user *argp)
461 {
462 	struct tpmi_per_power_domain_info *power_domain_info;
463 	struct isst_core_power core_power;
464 
465 	if (disable_dynamic_sst_features())
466 		return -EFAULT;
467 
468 	if (copy_from_user(&core_power, argp, sizeof(core_power)))
469 		return -EFAULT;
470 
471 	power_domain_info = get_instance(core_power.socket_id, core_power.power_domain_id);
472 	if (!power_domain_info)
473 		return -EINVAL;
474 
475 	if (core_power.get_set) {
476 		_write_cp_info("cp_enable", core_power.enable, SST_CP_CONTROL_OFFSET,
477 			       SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
478 		_write_cp_info("cp_prio_type", core_power.priority_type, SST_CP_CONTROL_OFFSET,
479 			       SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
480 			       SST_MUL_FACTOR_NONE)
481 	} else {
482 		/* get */
483 		_read_cp_info("cp_enable", core_power.enable, SST_CP_STATUS_OFFSET,
484 			      SST_CP_ENABLE_START, SST_CP_ENABLE_WIDTH, SST_MUL_FACTOR_NONE)
485 		_read_cp_info("cp_prio_type", core_power.priority_type, SST_CP_STATUS_OFFSET,
486 			      SST_CP_PRIORITY_TYPE_START, SST_CP_PRIORITY_TYPE_WIDTH,
487 			      SST_MUL_FACTOR_NONE)
488 		core_power.supported = !!(power_domain_info->sst_header.cap_mask & BIT(0));
489 		if (copy_to_user(argp, &core_power, sizeof(core_power)))
490 			return -EFAULT;
491 	}
492 
493 	return 0;
494 }
495 
496 #define SST_CLOS_CONFIG_0_OFFSET	24
497 
498 #define SST_CLOS_CONFIG_PRIO_START	4
499 #define SST_CLOS_CONFIG_PRIO_WIDTH	4
500 
501 #define SST_CLOS_CONFIG_MIN_START	8
502 #define SST_CLOS_CONFIG_MIN_WIDTH	8
503 
504 #define SST_CLOS_CONFIG_MAX_START	16
505 #define SST_CLOS_CONFIG_MAX_WIDTH	8
506 
507 static long isst_if_clos_param(void __user *argp)
508 {
509 	struct tpmi_per_power_domain_info *power_domain_info;
510 	struct isst_clos_param clos_param;
511 
512 	if (copy_from_user(&clos_param, argp, sizeof(clos_param)))
513 		return -EFAULT;
514 
515 	power_domain_info = get_instance(clos_param.socket_id, clos_param.power_domain_id);
516 	if (!power_domain_info)
517 		return -EINVAL;
518 
519 	if (clos_param.get_set) {
520 		if (power_domain_info->write_blocked)
521 			return -EPERM;
522 
523 		_write_cp_info("clos.min_freq", clos_param.min_freq_mhz,
524 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
525 			       SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
526 			       SST_MUL_FACTOR_FREQ);
527 		_write_cp_info("clos.max_freq", clos_param.max_freq_mhz,
528 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
529 			       SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
530 			       SST_MUL_FACTOR_FREQ);
531 		_write_cp_info("clos.prio", clos_param.prop_prio,
532 			       (SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
533 			       SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
534 			       SST_MUL_FACTOR_NONE);
535 	} else {
536 		/* get */
537 		_read_cp_info("clos.min_freq", clos_param.min_freq_mhz,
538 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
539 				SST_CLOS_CONFIG_MIN_START, SST_CLOS_CONFIG_MIN_WIDTH,
540 				SST_MUL_FACTOR_FREQ)
541 		_read_cp_info("clos.max_freq", clos_param.max_freq_mhz,
542 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
543 				SST_CLOS_CONFIG_MAX_START, SST_CLOS_CONFIG_MAX_WIDTH,
544 				SST_MUL_FACTOR_FREQ)
545 		_read_cp_info("clos.prio", clos_param.prop_prio,
546 				(SST_CLOS_CONFIG_0_OFFSET + clos_param.clos * SST_REG_SIZE),
547 				SST_CLOS_CONFIG_PRIO_START, SST_CLOS_CONFIG_PRIO_WIDTH,
548 				SST_MUL_FACTOR_NONE)
549 
550 		if (copy_to_user(argp, &clos_param, sizeof(clos_param)))
551 			return -EFAULT;
552 	}
553 
554 	return 0;
555 }
556 
557 #define SST_CLOS_ASSOC_0_OFFSET		56
558 #define SST_CLOS_ASSOC_CPUS_PER_REG	16
559 #define SST_CLOS_ASSOC_BITS_PER_CPU	4
560 
561 static long isst_if_clos_assoc(void __user *argp)
562 {
563 	struct isst_if_clos_assoc_cmds assoc_cmds;
564 	unsigned char __user *ptr;
565 	int i;
566 
567 	/* Each multi command has u16 command count as the first field */
568 	if (copy_from_user(&assoc_cmds, argp, sizeof(assoc_cmds)))
569 		return -EFAULT;
570 
571 	if (!assoc_cmds.cmd_count || assoc_cmds.cmd_count > ISST_IF_CMD_LIMIT)
572 		return -EINVAL;
573 
574 	ptr = argp + offsetof(struct isst_if_clos_assoc_cmds, assoc_info);
575 	for (i = 0; i < assoc_cmds.cmd_count; ++i) {
576 		struct tpmi_per_power_domain_info *power_domain_info;
577 		struct isst_if_clos_assoc clos_assoc;
578 		int punit_id, punit_cpu_no, pkg_id;
579 		struct tpmi_sst_struct *sst_inst;
580 		int offset, shift, cpu;
581 		u64 val, mask, clos;
582 
583 		if (copy_from_user(&clos_assoc, ptr, sizeof(clos_assoc)))
584 			return -EFAULT;
585 
586 		if (clos_assoc.socket_id > topology_max_packages())
587 			return -EINVAL;
588 
589 		cpu = clos_assoc.logical_cpu;
590 		clos = clos_assoc.clos;
591 
592 		if (assoc_cmds.punit_cpu_map)
593 			punit_cpu_no = cpu;
594 		else
595 			return -EOPNOTSUPP;
596 
597 		if (punit_cpu_no < 0)
598 			return -EINVAL;
599 
600 		punit_id = clos_assoc.power_domain_id;
601 		pkg_id = clos_assoc.socket_id;
602 
603 		sst_inst = isst_common.sst_inst[pkg_id];
604 
605 		if (clos_assoc.power_domain_id > sst_inst->number_of_power_domains)
606 			return -EINVAL;
607 
608 		power_domain_info = &sst_inst->power_domain_info[punit_id];
609 
610 		if (assoc_cmds.get_set && power_domain_info->write_blocked)
611 			return -EPERM;
612 
613 		offset = SST_CLOS_ASSOC_0_OFFSET +
614 				(punit_cpu_no / SST_CLOS_ASSOC_CPUS_PER_REG) * SST_REG_SIZE;
615 		shift = punit_cpu_no % SST_CLOS_ASSOC_CPUS_PER_REG;
616 		shift *= SST_CLOS_ASSOC_BITS_PER_CPU;
617 
618 		val = readq(power_domain_info->sst_base +
619 				power_domain_info->sst_header.cp_offset + offset);
620 		if (assoc_cmds.get_set) {
621 			mask = GENMASK_ULL((shift + SST_CLOS_ASSOC_BITS_PER_CPU - 1), shift);
622 			val &= ~mask;
623 			val |= (clos << shift);
624 			writeq(val, power_domain_info->sst_base +
625 					power_domain_info->sst_header.cp_offset + offset);
626 		} else {
627 			val >>= shift;
628 			clos_assoc.clos = val & GENMASK(SST_CLOS_ASSOC_BITS_PER_CPU - 1, 0);
629 			if (copy_to_user(ptr, &clos_assoc, sizeof(clos_assoc)))
630 				return -EFAULT;
631 		}
632 
633 		ptr += sizeof(clos_assoc);
634 	}
635 
636 	return 0;
637 }
638 
639 #define _read_pp_info(name_str, name, offset, start, width, mult_factor)\
640 {\
641 	u64 val, _mask;\
642 	\
643 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
644 		    (offset));\
645 	_mask = GENMASK_ULL((start + width - 1), start);\
646 	val &= _mask;\
647 	val >>= start;\
648 	name = (val * mult_factor);\
649 }
650 
651 #define _write_pp_info(name_str, name, offset, start, width, div_factor)\
652 {\
653 	u64 val, _mask;\
654 	\
655 	val = readq(power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
656 		    (offset));\
657 	_mask = GENMASK((start + width - 1), start);\
658 	val &= ~_mask;\
659 	val |= (name / div_factor) << start;\
660 	writeq(val, power_domain_info->sst_base + power_domain_info->sst_header.pp_offset +\
661 	      (offset));\
662 }
663 
664 #define _read_bf_level_info(name_str, name, level, offset, start, width, mult_factor)\
665 {\
666 	u64 val, _mask;\
667 	\
668 	val = readq(power_domain_info->sst_base +\
669 		    power_domain_info->perf_levels[level].mmio_offset +\
670 		(power_domain_info->feature_offsets.bf_offset * 8) + (offset));\
671 	_mask = GENMASK_ULL((start + width - 1), start);\
672 	val &= _mask; \
673 	val >>= start;\
674 	name = (val * mult_factor);\
675 }
676 
677 #define _read_tf_level_info(name_str, name, level, offset, start, width, mult_factor)\
678 {\
679 	u64 val, _mask;\
680 	\
681 	val = readq(power_domain_info->sst_base +\
682 		    power_domain_info->perf_levels[level].mmio_offset +\
683 		(power_domain_info->feature_offsets.tf_offset * 8) + (offset));\
684 	_mask = GENMASK_ULL((start + width - 1), start);\
685 	val &= _mask; \
686 	val >>= start;\
687 	name = (val * mult_factor);\
688 }
689 
690 #define SST_PP_STATUS_OFFSET	32
691 
692 #define SST_PP_LEVEL_START	0
693 #define SST_PP_LEVEL_WIDTH	3
694 
695 #define SST_PP_LOCK_START	3
696 #define SST_PP_LOCK_WIDTH	1
697 
698 #define SST_PP_FEATURE_STATE_START	8
699 #define SST_PP_FEATURE_STATE_WIDTH	8
700 
701 #define SST_BF_FEATURE_SUPPORTED_START	12
702 #define SST_BF_FEATURE_SUPPORTED_WIDTH	1
703 
704 #define SST_TF_FEATURE_SUPPORTED_START	12
705 #define SST_TF_FEATURE_SUPPORTED_WIDTH	1
706 
707 static int isst_if_get_perf_level(void __user *argp)
708 {
709 	struct isst_perf_level_info perf_level;
710 	struct tpmi_per_power_domain_info *power_domain_info;
711 
712 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
713 		return -EFAULT;
714 
715 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
716 	if (!power_domain_info)
717 		return -EINVAL;
718 
719 	perf_level.max_level = power_domain_info->max_level;
720 	perf_level.level_mask = power_domain_info->pp_header.level_en_mask;
721 	perf_level.feature_rev = power_domain_info->pp_header.feature_rev;
722 	_read_pp_info("current_level", perf_level.current_level, SST_PP_STATUS_OFFSET,
723 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
724 	_read_pp_info("locked", perf_level.locked, SST_PP_STATUS_OFFSET,
725 		      SST_PP_LOCK_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
726 	_read_pp_info("feature_state", perf_level.feature_state, SST_PP_STATUS_OFFSET,
727 		      SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH, SST_MUL_FACTOR_NONE)
728 	perf_level.enabled = !!(power_domain_info->sst_header.cap_mask & BIT(1));
729 
730 	_read_bf_level_info("bf_support", perf_level.sst_bf_support, 0, 0,
731 			    SST_BF_FEATURE_SUPPORTED_START, SST_BF_FEATURE_SUPPORTED_WIDTH,
732 			    SST_MUL_FACTOR_NONE);
733 	_read_tf_level_info("tf_support", perf_level.sst_tf_support, 0, 0,
734 			    SST_TF_FEATURE_SUPPORTED_START, SST_TF_FEATURE_SUPPORTED_WIDTH,
735 			    SST_MUL_FACTOR_NONE);
736 
737 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
738 		return -EFAULT;
739 
740 	return 0;
741 }
742 
743 #define SST_PP_CONTROL_OFFSET		24
744 #define SST_PP_LEVEL_CHANGE_TIME_MS	5
745 #define SST_PP_LEVEL_CHANGE_RETRY_COUNT	3
746 
747 static int isst_if_set_perf_level(void __user *argp)
748 {
749 	struct isst_perf_level_control perf_level;
750 	struct tpmi_per_power_domain_info *power_domain_info;
751 	int level, retry = 0;
752 
753 	if (disable_dynamic_sst_features())
754 		return -EFAULT;
755 
756 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
757 		return -EFAULT;
758 
759 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
760 	if (!power_domain_info)
761 		return -EINVAL;
762 
763 	if (power_domain_info->write_blocked)
764 		return -EPERM;
765 
766 	if (!(power_domain_info->pp_header.allowed_level_mask & BIT(perf_level.level)))
767 		return -EINVAL;
768 
769 	_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
770 		      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
771 
772 	/* If the requested new level is same as the current level, reject */
773 	if (perf_level.level == level)
774 		return -EINVAL;
775 
776 	_write_pp_info("perf_level", perf_level.level, SST_PP_CONTROL_OFFSET,
777 		       SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
778 
779 	/* It is possible that firmware is busy (although unlikely), so retry */
780 	do {
781 		/* Give time to FW to process */
782 		msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
783 
784 		_read_pp_info("current_level", level, SST_PP_STATUS_OFFSET,
785 			      SST_PP_LEVEL_START, SST_PP_LEVEL_WIDTH, SST_MUL_FACTOR_NONE)
786 
787 		/* Check if the new level is active */
788 		if (perf_level.level == level)
789 			break;
790 
791 	} while (retry++ < SST_PP_LEVEL_CHANGE_RETRY_COUNT);
792 
793 	/* If the level change didn't happen, return fault */
794 	if (perf_level.level != level)
795 		return -EFAULT;
796 
797 	/* Reset the feature state on level change */
798 	_write_pp_info("perf_feature", 0, SST_PP_CONTROL_OFFSET,
799 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
800 		       SST_MUL_FACTOR_NONE)
801 
802 	/* Give time to FW to process */
803 	msleep(SST_PP_LEVEL_CHANGE_TIME_MS);
804 
805 	return 0;
806 }
807 
808 static int isst_if_set_perf_feature(void __user *argp)
809 {
810 	struct isst_perf_feature_control perf_feature;
811 	struct tpmi_per_power_domain_info *power_domain_info;
812 
813 	if (disable_dynamic_sst_features())
814 		return -EFAULT;
815 
816 	if (copy_from_user(&perf_feature, argp, sizeof(perf_feature)))
817 		return -EFAULT;
818 
819 	power_domain_info = get_instance(perf_feature.socket_id, perf_feature.power_domain_id);
820 	if (!power_domain_info)
821 		return -EINVAL;
822 
823 	if (power_domain_info->write_blocked)
824 		return -EPERM;
825 
826 	_write_pp_info("perf_feature", perf_feature.feature, SST_PP_CONTROL_OFFSET,
827 		       SST_PP_FEATURE_STATE_START, SST_PP_FEATURE_STATE_WIDTH,
828 		       SST_MUL_FACTOR_NONE)
829 
830 	return 0;
831 }
832 
833 #define _read_pp_level_info(name_str, name, level, offset, start, width, mult_factor)\
834 {\
835 	u64 val, _mask;\
836 	\
837 	val = readq(power_domain_info->sst_base +\
838 		    power_domain_info->perf_levels[level].mmio_offset +\
839 		(power_domain_info->feature_offsets.pp_offset * 8) + (offset));\
840 	_mask = GENMASK_ULL((start + width - 1), start);\
841 	val &= _mask; \
842 	val >>= start;\
843 	name = (val * mult_factor);\
844 }
845 
846 #define SST_PP_INFO_0_OFFSET	0
847 #define SST_PP_INFO_1_OFFSET	8
848 #define SST_PP_INFO_2_OFFSET	16
849 #define SST_PP_INFO_3_OFFSET	24
850 
851 /* SST_PP_INFO_4_OFFSET to SST_PP_INFO_9_OFFSET are trl levels */
852 #define SST_PP_INFO_4_OFFSET	32
853 
854 #define SST_PP_INFO_10_OFFSET	80
855 #define SST_PP_INFO_11_OFFSET	88
856 
857 #define SST_PP_P1_SSE_START	0
858 #define SST_PP_P1_SSE_WIDTH	8
859 
860 #define SST_PP_P1_AVX2_START	8
861 #define SST_PP_P1_AVX2_WIDTH	8
862 
863 #define SST_PP_P1_AVX512_START	16
864 #define SST_PP_P1_AVX512_WIDTH	8
865 
866 #define SST_PP_P1_AMX_START	24
867 #define SST_PP_P1_AMX_WIDTH	8
868 
869 #define SST_PP_TDP_START	32
870 #define SST_PP_TDP_WIDTH	15
871 
872 #define SST_PP_T_PROCHOT_START	47
873 #define SST_PP_T_PROCHOT_WIDTH	8
874 
875 #define SST_PP_MAX_MEMORY_FREQ_START	55
876 #define SST_PP_MAX_MEMORY_FREQ_WIDTH	7
877 
878 #define SST_PP_COOLING_TYPE_START	62
879 #define SST_PP_COOLING_TYPE_WIDTH	2
880 
881 #define SST_PP_TRL_0_RATIO_0_START	0
882 #define SST_PP_TRL_0_RATIO_0_WIDTH	8
883 
884 #define SST_PP_TRL_CORES_BUCKET_0_START	0
885 #define SST_PP_TRL_CORES_BUCKET_0_WIDTH	8
886 
887 #define SST_PP_CORE_RATIO_P0_START	0
888 #define SST_PP_CORE_RATIO_P0_WIDTH	8
889 
890 #define SST_PP_CORE_RATIO_P1_START	8
891 #define SST_PP_CORE_RATIO_P1_WIDTH	8
892 
893 #define SST_PP_CORE_RATIO_PN_START	16
894 #define SST_PP_CORE_RATIO_PN_WIDTH	8
895 
896 #define SST_PP_CORE_RATIO_PM_START	24
897 #define SST_PP_CORE_RATIO_PM_WIDTH	8
898 
899 #define SST_PP_CORE_RATIO_P0_FABRIC_START	32
900 #define SST_PP_CORE_RATIO_P0_FABRIC_WIDTH	8
901 
902 #define SST_PP_CORE_RATIO_P1_FABRIC_START	40
903 #define SST_PP_CORE_RATIO_P1_FABRIC_WIDTH	8
904 
905 #define SST_PP_CORE_RATIO_PM_FABRIC_START	48
906 #define SST_PP_CORE_RATIO_PM_FABRIC_WIDTH	8
907 
908 static int isst_if_get_perf_level_info(void __user *argp)
909 {
910 	struct isst_perf_level_data_info perf_level;
911 	struct tpmi_per_power_domain_info *power_domain_info;
912 	int i, j;
913 
914 	if (copy_from_user(&perf_level, argp, sizeof(perf_level)))
915 		return -EFAULT;
916 
917 	power_domain_info = get_instance(perf_level.socket_id, perf_level.power_domain_id);
918 	if (!power_domain_info)
919 		return -EINVAL;
920 
921 	if (perf_level.level > power_domain_info->max_level)
922 		return -EINVAL;
923 
924 	if (!(power_domain_info->pp_header.level_en_mask & BIT(perf_level.level)))
925 		return -EINVAL;
926 
927 	_read_pp_level_info("tdp_ratio", perf_level.tdp_ratio, perf_level.level,
928 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
929 			    SST_MUL_FACTOR_NONE)
930 	_read_pp_level_info("base_freq_mhz", perf_level.base_freq_mhz, perf_level.level,
931 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_SSE_START, SST_PP_P1_SSE_WIDTH,
932 			    SST_MUL_FACTOR_FREQ)
933 	_read_pp_level_info("base_freq_avx2_mhz", perf_level.base_freq_avx2_mhz, perf_level.level,
934 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX2_START, SST_PP_P1_AVX2_WIDTH,
935 			    SST_MUL_FACTOR_FREQ)
936 	_read_pp_level_info("base_freq_avx512_mhz", perf_level.base_freq_avx512_mhz,
937 			    perf_level.level, SST_PP_INFO_0_OFFSET, SST_PP_P1_AVX512_START,
938 			    SST_PP_P1_AVX512_WIDTH, SST_MUL_FACTOR_FREQ)
939 	_read_pp_level_info("base_freq_amx_mhz", perf_level.base_freq_amx_mhz, perf_level.level,
940 			    SST_PP_INFO_0_OFFSET, SST_PP_P1_AMX_START, SST_PP_P1_AMX_WIDTH,
941 			    SST_MUL_FACTOR_FREQ)
942 
943 	_read_pp_level_info("thermal_design_power_w", perf_level.thermal_design_power_w,
944 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_TDP_START,
945 			    SST_PP_TDP_WIDTH, SST_MUL_FACTOR_NONE)
946 	perf_level.thermal_design_power_w /= 8; /* units are in 1/8th watt */
947 	_read_pp_level_info("tjunction_max_c", perf_level.tjunction_max_c, perf_level.level,
948 			    SST_PP_INFO_1_OFFSET, SST_PP_T_PROCHOT_START, SST_PP_T_PROCHOT_WIDTH,
949 			    SST_MUL_FACTOR_NONE)
950 	_read_pp_level_info("max_memory_freq_mhz", perf_level.max_memory_freq_mhz,
951 			    perf_level.level, SST_PP_INFO_1_OFFSET, SST_PP_MAX_MEMORY_FREQ_START,
952 			    SST_PP_MAX_MEMORY_FREQ_WIDTH, SST_MUL_FACTOR_FREQ)
953 	_read_pp_level_info("cooling_type", perf_level.cooling_type, perf_level.level,
954 			    SST_PP_INFO_1_OFFSET, SST_PP_COOLING_TYPE_START,
955 			    SST_PP_COOLING_TYPE_WIDTH, SST_MUL_FACTOR_NONE)
956 
957 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
958 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
959 			_read_pp_level_info("trl*_bucket*_freq_mhz",
960 					    perf_level.trl_freq_mhz[i][j], perf_level.level,
961 					    SST_PP_INFO_4_OFFSET + (i * SST_PP_TRL_0_RATIO_0_WIDTH),
962 					    j * SST_PP_TRL_0_RATIO_0_WIDTH,
963 					    SST_PP_TRL_0_RATIO_0_WIDTH,
964 					    SST_MUL_FACTOR_FREQ);
965 	}
966 
967 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
968 		_read_pp_level_info("bucket*_core_count", perf_level.bucket_core_counts[i],
969 				    perf_level.level, SST_PP_INFO_10_OFFSET,
970 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH * i,
971 				    SST_PP_TRL_CORES_BUCKET_0_WIDTH, SST_MUL_FACTOR_NONE)
972 
973 	perf_level.max_buckets = TRL_MAX_BUCKETS;
974 	perf_level.max_trl_levels = TRL_MAX_LEVELS;
975 
976 	_read_pp_level_info("p0_freq_mhz", perf_level.p0_freq_mhz, perf_level.level,
977 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P0_START,
978 			    SST_PP_CORE_RATIO_P0_WIDTH, SST_MUL_FACTOR_FREQ)
979 	_read_pp_level_info("p1_freq_mhz", perf_level.p1_freq_mhz, perf_level.level,
980 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_P1_START,
981 			    SST_PP_CORE_RATIO_P1_WIDTH, SST_MUL_FACTOR_FREQ)
982 	_read_pp_level_info("pn_freq_mhz", perf_level.pn_freq_mhz, perf_level.level,
983 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PN_START,
984 			    SST_PP_CORE_RATIO_PN_WIDTH, SST_MUL_FACTOR_FREQ)
985 	_read_pp_level_info("pm_freq_mhz", perf_level.pm_freq_mhz, perf_level.level,
986 			    SST_PP_INFO_11_OFFSET, SST_PP_CORE_RATIO_PM_START,
987 			    SST_PP_CORE_RATIO_PM_WIDTH, SST_MUL_FACTOR_FREQ)
988 	_read_pp_level_info("p0_fabric_freq_mhz", perf_level.p0_fabric_freq_mhz,
989 			    perf_level.level, SST_PP_INFO_11_OFFSET,
990 			    SST_PP_CORE_RATIO_P0_FABRIC_START,
991 			    SST_PP_CORE_RATIO_P0_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
992 	_read_pp_level_info("p1_fabric_freq_mhz", perf_level.p1_fabric_freq_mhz,
993 			    perf_level.level, SST_PP_INFO_11_OFFSET,
994 			    SST_PP_CORE_RATIO_P1_FABRIC_START,
995 			    SST_PP_CORE_RATIO_P1_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
996 	_read_pp_level_info("pm_fabric_freq_mhz", perf_level.pm_fabric_freq_mhz,
997 			    perf_level.level, SST_PP_INFO_11_OFFSET,
998 			    SST_PP_CORE_RATIO_PM_FABRIC_START,
999 			    SST_PP_CORE_RATIO_PM_FABRIC_WIDTH, SST_MUL_FACTOR_FREQ)
1000 
1001 	if (copy_to_user(argp, &perf_level, sizeof(perf_level)))
1002 		return -EFAULT;
1003 
1004 	return 0;
1005 }
1006 
1007 #define SST_PP_FUSED_CORE_COUNT_START	0
1008 #define SST_PP_FUSED_CORE_COUNT_WIDTH	8
1009 
1010 #define SST_PP_RSLVD_CORE_COUNT_START	8
1011 #define SST_PP_RSLVD_CORE_COUNT_WIDTH	8
1012 
1013 #define SST_PP_RSLVD_CORE_MASK_START	0
1014 #define SST_PP_RSLVD_CORE_MASK_WIDTH	64
1015 
1016 static int isst_if_get_perf_level_mask(void __user *argp)
1017 {
1018 	static struct isst_perf_level_cpu_mask cpumask;
1019 	struct tpmi_per_power_domain_info *power_domain_info;
1020 	u64 mask;
1021 
1022 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1023 		return -EFAULT;
1024 
1025 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1026 	if (!power_domain_info)
1027 		return -EINVAL;
1028 
1029 	_read_pp_level_info("mask", mask, cpumask.level, SST_PP_INFO_2_OFFSET,
1030 			    SST_PP_RSLVD_CORE_MASK_START, SST_PP_RSLVD_CORE_MASK_WIDTH,
1031 			    SST_MUL_FACTOR_NONE)
1032 
1033 	cpumask.mask = mask;
1034 
1035 	if (!cpumask.punit_cpu_map)
1036 		return -EOPNOTSUPP;
1037 
1038 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1039 		return -EFAULT;
1040 
1041 	return 0;
1042 }
1043 
1044 #define SST_BF_INFO_0_OFFSET	0
1045 #define SST_BF_INFO_1_OFFSET	8
1046 
1047 #define SST_BF_P1_HIGH_START	13
1048 #define SST_BF_P1_HIGH_WIDTH	8
1049 
1050 #define SST_BF_P1_LOW_START	21
1051 #define SST_BF_P1_LOW_WIDTH	8
1052 
1053 #define SST_BF_T_PROHOT_START	38
1054 #define SST_BF_T_PROHOT_WIDTH	8
1055 
1056 #define SST_BF_TDP_START	46
1057 #define SST_BF_TDP_WIDTH	15
1058 
1059 static int isst_if_get_base_freq_info(void __user *argp)
1060 {
1061 	static struct isst_base_freq_info base_freq;
1062 	struct tpmi_per_power_domain_info *power_domain_info;
1063 
1064 	if (copy_from_user(&base_freq, argp, sizeof(base_freq)))
1065 		return -EFAULT;
1066 
1067 	power_domain_info = get_instance(base_freq.socket_id, base_freq.power_domain_id);
1068 	if (!power_domain_info)
1069 		return -EINVAL;
1070 
1071 	if (base_freq.level > power_domain_info->max_level)
1072 		return -EINVAL;
1073 
1074 	_read_bf_level_info("p1_high", base_freq.high_base_freq_mhz, base_freq.level,
1075 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_HIGH_START, SST_BF_P1_HIGH_WIDTH,
1076 			    SST_MUL_FACTOR_FREQ)
1077 	_read_bf_level_info("p1_low", base_freq.low_base_freq_mhz, base_freq.level,
1078 			    SST_BF_INFO_0_OFFSET, SST_BF_P1_LOW_START, SST_BF_P1_LOW_WIDTH,
1079 			    SST_MUL_FACTOR_FREQ)
1080 	_read_bf_level_info("BF-TJ", base_freq.tjunction_max_c, base_freq.level,
1081 			    SST_BF_INFO_0_OFFSET, SST_BF_T_PROHOT_START, SST_BF_T_PROHOT_WIDTH,
1082 			    SST_MUL_FACTOR_NONE)
1083 	_read_bf_level_info("BF-tdp", base_freq.thermal_design_power_w, base_freq.level,
1084 			    SST_BF_INFO_0_OFFSET, SST_BF_TDP_START, SST_BF_TDP_WIDTH,
1085 			    SST_MUL_FACTOR_NONE)
1086 	base_freq.thermal_design_power_w /= 8; /*unit = 1/8th watt*/
1087 
1088 	if (copy_to_user(argp, &base_freq, sizeof(base_freq)))
1089 		return -EFAULT;
1090 
1091 	return 0;
1092 }
1093 
1094 #define P1_HI_CORE_MASK_START	0
1095 #define P1_HI_CORE_MASK_WIDTH	64
1096 
1097 static int isst_if_get_base_freq_mask(void __user *argp)
1098 {
1099 	static struct isst_perf_level_cpu_mask cpumask;
1100 	struct tpmi_per_power_domain_info *power_domain_info;
1101 	u64 mask;
1102 
1103 	if (copy_from_user(&cpumask, argp, sizeof(cpumask)))
1104 		return -EFAULT;
1105 
1106 	power_domain_info = get_instance(cpumask.socket_id, cpumask.power_domain_id);
1107 	if (!power_domain_info)
1108 		return -EINVAL;
1109 
1110 	_read_bf_level_info("BF-cpumask", mask, cpumask.level, SST_BF_INFO_1_OFFSET,
1111 			    P1_HI_CORE_MASK_START, P1_HI_CORE_MASK_WIDTH,
1112 			    SST_MUL_FACTOR_NONE)
1113 
1114 	cpumask.mask = mask;
1115 
1116 	if (!cpumask.punit_cpu_map)
1117 		return -EOPNOTSUPP;
1118 
1119 	if (copy_to_user(argp, &cpumask, sizeof(cpumask)))
1120 		return -EFAULT;
1121 
1122 	return 0;
1123 }
1124 
1125 static int isst_if_get_tpmi_instance_count(void __user *argp)
1126 {
1127 	struct isst_tpmi_instance_count tpmi_inst;
1128 	struct tpmi_sst_struct *sst_inst;
1129 	int i;
1130 
1131 	if (copy_from_user(&tpmi_inst, argp, sizeof(tpmi_inst)))
1132 		return -EFAULT;
1133 
1134 	if (tpmi_inst.socket_id >= topology_max_packages())
1135 		return -EINVAL;
1136 
1137 	tpmi_inst.count = isst_common.sst_inst[tpmi_inst.socket_id]->number_of_power_domains;
1138 
1139 	sst_inst = isst_common.sst_inst[tpmi_inst.socket_id];
1140 	tpmi_inst.valid_mask = 0;
1141 	for (i = 0; i < sst_inst->number_of_power_domains; ++i) {
1142 		struct tpmi_per_power_domain_info *pd_info;
1143 
1144 		pd_info = &sst_inst->power_domain_info[i];
1145 		if (pd_info->sst_base)
1146 			tpmi_inst.valid_mask |= BIT(i);
1147 	}
1148 
1149 	if (copy_to_user(argp, &tpmi_inst, sizeof(tpmi_inst)))
1150 		return -EFAULT;
1151 
1152 	return 0;
1153 }
1154 
1155 #define SST_TF_INFO_0_OFFSET	0
1156 #define SST_TF_INFO_1_OFFSET	8
1157 #define SST_TF_INFO_2_OFFSET	16
1158 
1159 #define SST_TF_MAX_LP_CLIP_RATIOS	TRL_MAX_LEVELS
1160 
1161 #define SST_TF_LP_CLIP_RATIO_0_START	16
1162 #define SST_TF_LP_CLIP_RATIO_0_WIDTH	8
1163 
1164 #define SST_TF_RATIO_0_START	0
1165 #define SST_TF_RATIO_0_WIDTH	8
1166 
1167 #define SST_TF_NUM_CORE_0_START 0
1168 #define SST_TF_NUM_CORE_0_WIDTH 8
1169 
1170 static int isst_if_get_turbo_freq_info(void __user *argp)
1171 {
1172 	static struct isst_turbo_freq_info turbo_freq;
1173 	struct tpmi_per_power_domain_info *power_domain_info;
1174 	int i, j;
1175 
1176 	if (copy_from_user(&turbo_freq, argp, sizeof(turbo_freq)))
1177 		return -EFAULT;
1178 
1179 	power_domain_info = get_instance(turbo_freq.socket_id, turbo_freq.power_domain_id);
1180 	if (!power_domain_info)
1181 		return -EINVAL;
1182 
1183 	if (turbo_freq.level > power_domain_info->max_level)
1184 		return -EINVAL;
1185 
1186 	turbo_freq.max_buckets = TRL_MAX_BUCKETS;
1187 	turbo_freq.max_trl_levels = TRL_MAX_LEVELS;
1188 	turbo_freq.max_clip_freqs = SST_TF_MAX_LP_CLIP_RATIOS;
1189 
1190 	for (i = 0; i < turbo_freq.max_clip_freqs; ++i)
1191 		_read_tf_level_info("lp_clip*", turbo_freq.lp_clip_freq_mhz[i],
1192 				    turbo_freq.level, SST_TF_INFO_0_OFFSET,
1193 				    SST_TF_LP_CLIP_RATIO_0_START +
1194 				    (i * SST_TF_LP_CLIP_RATIO_0_WIDTH),
1195 				    SST_TF_LP_CLIP_RATIO_0_WIDTH, SST_MUL_FACTOR_FREQ)
1196 
1197 	for (i = 0; i < TRL_MAX_LEVELS; ++i) {
1198 		for (j = 0; j < TRL_MAX_BUCKETS; ++j)
1199 			_read_tf_level_info("cydn*_bucket_*_trl",
1200 					    turbo_freq.trl_freq_mhz[i][j], turbo_freq.level,
1201 					    SST_TF_INFO_2_OFFSET + (i * SST_TF_RATIO_0_WIDTH),
1202 					    j * SST_TF_RATIO_0_WIDTH, SST_TF_RATIO_0_WIDTH,
1203 					    SST_MUL_FACTOR_FREQ)
1204 	}
1205 
1206 	for (i = 0; i < TRL_MAX_BUCKETS; ++i)
1207 		_read_tf_level_info("bucket_*_core_count", turbo_freq.bucket_core_counts[i],
1208 				    turbo_freq.level, SST_TF_INFO_1_OFFSET,
1209 				    SST_TF_NUM_CORE_0_WIDTH * i, SST_TF_NUM_CORE_0_WIDTH,
1210 				    SST_MUL_FACTOR_NONE)
1211 
1212 	if (copy_to_user(argp, &turbo_freq, sizeof(turbo_freq)))
1213 		return -EFAULT;
1214 
1215 	return 0;
1216 }
1217 
1218 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
1219 			      unsigned long arg)
1220 {
1221 	void __user *argp = (void __user *)arg;
1222 	long ret = -ENOTTY;
1223 
1224 	mutex_lock(&isst_tpmi_dev_lock);
1225 	switch (cmd) {
1226 	case ISST_IF_COUNT_TPMI_INSTANCES:
1227 		ret = isst_if_get_tpmi_instance_count(argp);
1228 		break;
1229 	case ISST_IF_CORE_POWER_STATE:
1230 		ret = isst_if_core_power_state(argp);
1231 		break;
1232 	case ISST_IF_CLOS_PARAM:
1233 		ret = isst_if_clos_param(argp);
1234 		break;
1235 	case ISST_IF_CLOS_ASSOC:
1236 		ret = isst_if_clos_assoc(argp);
1237 		break;
1238 	case ISST_IF_PERF_LEVELS:
1239 		ret = isst_if_get_perf_level(argp);
1240 		break;
1241 	case ISST_IF_PERF_SET_LEVEL:
1242 		ret = isst_if_set_perf_level(argp);
1243 		break;
1244 	case ISST_IF_PERF_SET_FEATURE:
1245 		ret = isst_if_set_perf_feature(argp);
1246 		break;
1247 	case ISST_IF_GET_PERF_LEVEL_INFO:
1248 		ret = isst_if_get_perf_level_info(argp);
1249 		break;
1250 	case ISST_IF_GET_PERF_LEVEL_CPU_MASK:
1251 		ret = isst_if_get_perf_level_mask(argp);
1252 		break;
1253 	case ISST_IF_GET_BASE_FREQ_INFO:
1254 		ret = isst_if_get_base_freq_info(argp);
1255 		break;
1256 	case ISST_IF_GET_BASE_FREQ_CPU_MASK:
1257 		ret = isst_if_get_base_freq_mask(argp);
1258 		break;
1259 	case ISST_IF_GET_TURBO_FREQ_INFO:
1260 		ret = isst_if_get_turbo_freq_info(argp);
1261 		break;
1262 	default:
1263 		break;
1264 	}
1265 	mutex_unlock(&isst_tpmi_dev_lock);
1266 
1267 	return ret;
1268 }
1269 
1270 #define TPMI_SST_AUTO_SUSPEND_DELAY_MS	2000
1271 
1272 int tpmi_sst_dev_add(struct auxiliary_device *auxdev)
1273 {
1274 	bool read_blocked = 0, write_blocked = 0;
1275 	struct intel_tpmi_plat_info *plat_info;
1276 	struct tpmi_sst_struct *tpmi_sst;
1277 	int i, ret, pkg = 0, inst = 0;
1278 	int num_resources;
1279 
1280 	ret = tpmi_get_feature_status(auxdev, TPMI_ID_SST, &read_blocked, &write_blocked);
1281 	if (ret)
1282 		dev_info(&auxdev->dev, "Can't read feature status: ignoring read/write blocked status\n");
1283 
1284 	if (read_blocked) {
1285 		dev_info(&auxdev->dev, "Firmware has blocked reads, exiting\n");
1286 		return -ENODEV;
1287 	}
1288 
1289 	plat_info = tpmi_get_platform_data(auxdev);
1290 	if (!plat_info) {
1291 		dev_err(&auxdev->dev, "No platform info\n");
1292 		return -EINVAL;
1293 	}
1294 
1295 	pkg = plat_info->package_id;
1296 	if (pkg >= topology_max_packages()) {
1297 		dev_err(&auxdev->dev, "Invalid package id :%x\n", pkg);
1298 		return -EINVAL;
1299 	}
1300 
1301 	if (isst_common.sst_inst[pkg])
1302 		return -EEXIST;
1303 
1304 	num_resources = tpmi_get_resource_count(auxdev);
1305 
1306 	if (!num_resources)
1307 		return -EINVAL;
1308 
1309 	tpmi_sst = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_sst), GFP_KERNEL);
1310 	if (!tpmi_sst)
1311 		return -ENOMEM;
1312 
1313 	tpmi_sst->power_domain_info = devm_kcalloc(&auxdev->dev, num_resources,
1314 						   sizeof(*tpmi_sst->power_domain_info),
1315 						   GFP_KERNEL);
1316 	if (!tpmi_sst->power_domain_info)
1317 		return -ENOMEM;
1318 
1319 	tpmi_sst->number_of_power_domains = num_resources;
1320 
1321 	for (i = 0; i < num_resources; ++i) {
1322 		struct resource *res;
1323 
1324 		res = tpmi_get_resource_at_index(auxdev, i);
1325 		if (!res) {
1326 			tpmi_sst->power_domain_info[i].sst_base = NULL;
1327 			continue;
1328 		}
1329 
1330 		tpmi_sst->power_domain_info[i].package_id = pkg;
1331 		tpmi_sst->power_domain_info[i].power_domain_id = i;
1332 		tpmi_sst->power_domain_info[i].auxdev = auxdev;
1333 		tpmi_sst->power_domain_info[i].write_blocked = write_blocked;
1334 		tpmi_sst->power_domain_info[i].sst_base = devm_ioremap_resource(&auxdev->dev, res);
1335 		if (IS_ERR(tpmi_sst->power_domain_info[i].sst_base))
1336 			return PTR_ERR(tpmi_sst->power_domain_info[i].sst_base);
1337 
1338 		ret = sst_main(auxdev, &tpmi_sst->power_domain_info[i]);
1339 		if (ret) {
1340 			devm_iounmap(&auxdev->dev, tpmi_sst->power_domain_info[i].sst_base);
1341 			tpmi_sst->power_domain_info[i].sst_base =  NULL;
1342 			continue;
1343 		}
1344 
1345 		++inst;
1346 	}
1347 
1348 	if (!inst)
1349 		return -ENODEV;
1350 
1351 	tpmi_sst->package_id = pkg;
1352 	auxiliary_set_drvdata(auxdev, tpmi_sst);
1353 
1354 	mutex_lock(&isst_tpmi_dev_lock);
1355 	if (isst_common.max_index < pkg)
1356 		isst_common.max_index = pkg;
1357 	isst_common.sst_inst[pkg] = tpmi_sst;
1358 	mutex_unlock(&isst_tpmi_dev_lock);
1359 
1360 	return 0;
1361 }
1362 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_add, INTEL_TPMI_SST);
1363 
1364 void tpmi_sst_dev_remove(struct auxiliary_device *auxdev)
1365 {
1366 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1367 
1368 	mutex_lock(&isst_tpmi_dev_lock);
1369 	isst_common.sst_inst[tpmi_sst->package_id] = NULL;
1370 	mutex_unlock(&isst_tpmi_dev_lock);
1371 }
1372 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_remove, INTEL_TPMI_SST);
1373 
1374 void tpmi_sst_dev_suspend(struct auxiliary_device *auxdev)
1375 {
1376 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1377 	struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
1378 	void __iomem *cp_base;
1379 
1380 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1381 	power_domain_info->saved_sst_cp_control = readq(cp_base + SST_CP_CONTROL_OFFSET);
1382 
1383 	memcpy_fromio(power_domain_info->saved_clos_configs, cp_base + SST_CLOS_CONFIG_0_OFFSET,
1384 		      sizeof(power_domain_info->saved_clos_configs));
1385 
1386 	memcpy_fromio(power_domain_info->saved_clos_assocs, cp_base + SST_CLOS_ASSOC_0_OFFSET,
1387 		      sizeof(power_domain_info->saved_clos_assocs));
1388 
1389 	power_domain_info->saved_pp_control = readq(power_domain_info->sst_base +
1390 						    power_domain_info->sst_header.pp_offset +
1391 						    SST_PP_CONTROL_OFFSET);
1392 }
1393 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_suspend, INTEL_TPMI_SST);
1394 
1395 void tpmi_sst_dev_resume(struct auxiliary_device *auxdev)
1396 {
1397 	struct tpmi_sst_struct *tpmi_sst = auxiliary_get_drvdata(auxdev);
1398 	struct tpmi_per_power_domain_info *power_domain_info = tpmi_sst->power_domain_info;
1399 	void __iomem *cp_base;
1400 
1401 	cp_base = power_domain_info->sst_base + power_domain_info->sst_header.cp_offset;
1402 	writeq(power_domain_info->saved_sst_cp_control, cp_base + SST_CP_CONTROL_OFFSET);
1403 
1404 	memcpy_toio(cp_base + SST_CLOS_CONFIG_0_OFFSET, power_domain_info->saved_clos_configs,
1405 		    sizeof(power_domain_info->saved_clos_configs));
1406 
1407 	memcpy_toio(cp_base + SST_CLOS_ASSOC_0_OFFSET, power_domain_info->saved_clos_assocs,
1408 		    sizeof(power_domain_info->saved_clos_assocs));
1409 
1410 	writeq(power_domain_info->saved_pp_control, power_domain_info->sst_base +
1411 				power_domain_info->sst_header.pp_offset + SST_PP_CONTROL_OFFSET);
1412 }
1413 EXPORT_SYMBOL_NS_GPL(tpmi_sst_dev_resume, INTEL_TPMI_SST);
1414 
1415 #define ISST_TPMI_API_VERSION	0x02
1416 
1417 int tpmi_sst_init(void)
1418 {
1419 	struct isst_if_cmd_cb cb;
1420 	int ret = 0;
1421 
1422 	mutex_lock(&isst_tpmi_dev_lock);
1423 
1424 	if (isst_core_usage_count) {
1425 		++isst_core_usage_count;
1426 		goto init_done;
1427 	}
1428 
1429 	isst_common.sst_inst = kcalloc(topology_max_packages(),
1430 				       sizeof(*isst_common.sst_inst),
1431 				       GFP_KERNEL);
1432 	if (!isst_common.sst_inst) {
1433 		ret = -ENOMEM;
1434 		goto init_done;
1435 	}
1436 
1437 	memset(&cb, 0, sizeof(cb));
1438 	cb.cmd_size = sizeof(struct isst_if_io_reg);
1439 	cb.offset = offsetof(struct isst_if_io_regs, io_reg);
1440 	cb.cmd_callback = NULL;
1441 	cb.api_version = ISST_TPMI_API_VERSION;
1442 	cb.def_ioctl = isst_if_def_ioctl;
1443 	cb.owner = THIS_MODULE;
1444 	ret = isst_if_cdev_register(ISST_IF_DEV_TPMI, &cb);
1445 	if (ret)
1446 		kfree(isst_common.sst_inst);
1447 	else
1448 		++isst_core_usage_count;
1449 init_done:
1450 	mutex_unlock(&isst_tpmi_dev_lock);
1451 	return ret;
1452 }
1453 EXPORT_SYMBOL_NS_GPL(tpmi_sst_init, INTEL_TPMI_SST);
1454 
1455 void tpmi_sst_exit(void)
1456 {
1457 	mutex_lock(&isst_tpmi_dev_lock);
1458 	if (isst_core_usage_count)
1459 		--isst_core_usage_count;
1460 
1461 	if (!isst_core_usage_count) {
1462 		isst_if_cdev_unregister(ISST_IF_DEV_TPMI);
1463 		kfree(isst_common.sst_inst);
1464 	}
1465 	mutex_unlock(&isst_tpmi_dev_lock);
1466 }
1467 EXPORT_SYMBOL_NS_GPL(tpmi_sst_exit, INTEL_TPMI_SST);
1468 
1469 MODULE_IMPORT_NS(INTEL_TPMI);
1470 MODULE_IMPORT_NS(INTEL_TPMI_POWER_DOMAIN);
1471 
1472 MODULE_LICENSE("GPL");
1473