1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Processor thermal device for newer processors
4  * Copyright (c) 2020, Intel Corporation.
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/thermal.h>
12 
13 #include "int340x_thermal_zone.h"
14 #include "processor_thermal_device.h"
15 
16 #define DRV_NAME "proc_thermal_pci"
17 
18 struct proc_thermal_pci {
19 	struct pci_dev *pdev;
20 	struct proc_thermal_device *proc_priv;
21 	struct thermal_zone_device *tzone;
22 	struct delayed_work work;
23 	int stored_thres;
24 	int no_legacy;
25 };
26 
27 enum proc_thermal_mmio_type {
28 	PROC_THERMAL_MMIO_TJMAX,
29 	PROC_THERMAL_MMIO_PP0_TEMP,
30 	PROC_THERMAL_MMIO_PP1_TEMP,
31 	PROC_THERMAL_MMIO_PKG_TEMP,
32 	PROC_THERMAL_MMIO_THRES_0,
33 	PROC_THERMAL_MMIO_THRES_1,
34 	PROC_THERMAL_MMIO_INT_ENABLE_0,
35 	PROC_THERMAL_MMIO_INT_ENABLE_1,
36 	PROC_THERMAL_MMIO_INT_STATUS_0,
37 	PROC_THERMAL_MMIO_INT_STATUS_1,
38 	PROC_THERMAL_MMIO_MAX
39 };
40 
41 struct proc_thermal_mmio_info {
42 	enum proc_thermal_mmio_type mmio_type;
43 	u64	mmio_addr;
44 	u64	shift;
45 	u64	mask;
46 };
47 
48 static struct proc_thermal_mmio_info proc_thermal_mmio_info[] = {
49 	{ PROC_THERMAL_MMIO_TJMAX, 0x599c, 16, 0xff },
50 	{ PROC_THERMAL_MMIO_PP0_TEMP, 0x597c, 0, 0xff },
51 	{ PROC_THERMAL_MMIO_PP1_TEMP, 0x5980, 0, 0xff },
52 	{ PROC_THERMAL_MMIO_PKG_TEMP, 0x5978, 0, 0xff },
53 	{ PROC_THERMAL_MMIO_THRES_0, 0x5820, 8, 0x7F },
54 	{ PROC_THERMAL_MMIO_THRES_1, 0x5820, 16, 0x7F },
55 	{ PROC_THERMAL_MMIO_INT_ENABLE_0, 0x5820, 15, 0x01 },
56 	{ PROC_THERMAL_MMIO_INT_ENABLE_1, 0x5820, 23, 0x01 },
57 	{ PROC_THERMAL_MMIO_INT_STATUS_0, 0x7200, 6, 0x01 },
58 	{ PROC_THERMAL_MMIO_INT_STATUS_1, 0x7200, 8, 0x01 },
59 };
60 
61 #define B0D4_THERMAL_NOTIFY_DELAY	1000
62 static int notify_delay_ms = B0D4_THERMAL_NOTIFY_DELAY;
63 
64 static void proc_thermal_mmio_read(struct proc_thermal_pci *pci_info,
65 				    enum proc_thermal_mmio_type type,
66 				    u32 *value)
67 {
68 	*value = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base +
69 				proc_thermal_mmio_info[type].mmio_addr));
70 	*value >>= proc_thermal_mmio_info[type].shift;
71 	*value &= proc_thermal_mmio_info[type].mask;
72 }
73 
74 static void proc_thermal_mmio_write(struct proc_thermal_pci *pci_info,
75 				     enum proc_thermal_mmio_type type,
76 				     u32 value)
77 {
78 	u32 current_val;
79 	u32 mask;
80 
81 	current_val = ioread32(((u8 __iomem *)pci_info->proc_priv->mmio_base +
82 				proc_thermal_mmio_info[type].mmio_addr));
83 	mask = proc_thermal_mmio_info[type].mask << proc_thermal_mmio_info[type].shift;
84 	current_val &= ~mask;
85 
86 	value &= proc_thermal_mmio_info[type].mask;
87 	value <<= proc_thermal_mmio_info[type].shift;
88 
89 	current_val |= value;
90 	iowrite32(current_val, ((u8 __iomem *)pci_info->proc_priv->mmio_base +
91 				proc_thermal_mmio_info[type].mmio_addr));
92 }
93 
94 /*
95  * To avoid sending two many messages to user space, we have 1 second delay.
96  * On interrupt we are disabling interrupt and enabling after 1 second.
97  * This workload function is delayed by 1 second.
98  */
99 static void proc_thermal_threshold_work_fn(struct work_struct *work)
100 {
101 	struct delayed_work *delayed_work = to_delayed_work(work);
102 	struct proc_thermal_pci *pci_info = container_of(delayed_work,
103 						struct proc_thermal_pci, work);
104 	struct thermal_zone_device *tzone = pci_info->tzone;
105 
106 	if (tzone)
107 		thermal_zone_device_update(tzone, THERMAL_TRIP_VIOLATED);
108 
109 	/* Enable interrupt flag */
110 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
111 }
112 
113 static void pkg_thermal_schedule_work(struct delayed_work *work)
114 {
115 	unsigned long ms = msecs_to_jiffies(notify_delay_ms);
116 
117 	schedule_delayed_work(work, ms);
118 }
119 
120 static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
121 {
122 	struct proc_thermal_pci *pci_info = devid;
123 	u32 status;
124 
125 	proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_INT_STATUS_0, &status);
126 
127 	/* Disable enable interrupt flag */
128 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
129 	pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
130 
131 	pkg_thermal_schedule_work(&pci_info->work);
132 
133 	return IRQ_HANDLED;
134 }
135 
136 static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
137 {
138 	struct proc_thermal_pci *pci_info = tzd->devdata;
139 	u32 _temp;
140 
141 	proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_PKG_TEMP, &_temp);
142 	*temp = (unsigned long)_temp * 1000;
143 
144 	return 0;
145 }
146 
147 static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
148 {
149 	struct proc_thermal_pci *pci_info = tzd->devdata;
150 	int tjmax, _temp;
151 
152 	if (temp <= 0) {
153 		cancel_delayed_work_sync(&pci_info->work);
154 		proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
155 		proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
156 		thermal_zone_device_disable(tzd);
157 		pci_info->stored_thres = 0;
158 		return 0;
159 	}
160 
161 	proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
162 	_temp = tjmax - (temp / 1000);
163 	if (_temp < 0)
164 		return -EINVAL;
165 
166 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, _temp);
167 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
168 
169 	pci_info->stored_thres = temp;
170 
171 	return 0;
172 }
173 
174 static int get_trip_temp(struct proc_thermal_pci *pci_info)
175 {
176 	int temp, tjmax;
177 
178 	proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_THRES_0, &temp);
179 	if (!temp)
180 		return THERMAL_TEMP_INVALID;
181 
182 	proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
183 	temp = (tjmax - temp) * 1000;
184 
185 	return temp;
186 }
187 
188 static struct thermal_trip psv_trip = {
189 	.type = THERMAL_TRIP_PASSIVE,
190 };
191 
192 static struct thermal_zone_device_ops tzone_ops = {
193 	.get_temp = sys_get_curr_temp,
194 	.set_trip_temp	= sys_set_trip_temp,
195 };
196 
197 static struct thermal_zone_params tzone_params = {
198 	.governor_name = "user_space",
199 	.no_hwmon = true,
200 };
201 
202 static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
203 {
204 	struct proc_thermal_device *proc_priv;
205 	struct proc_thermal_pci *pci_info;
206 	int irq_flag = 0, irq, ret;
207 
208 	proc_priv = devm_kzalloc(&pdev->dev, sizeof(*proc_priv), GFP_KERNEL);
209 	if (!proc_priv)
210 		return -ENOMEM;
211 
212 	pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
213 	if (!pci_info)
214 		return -ENOMEM;
215 
216 	pci_info->pdev = pdev;
217 	ret = pcim_enable_device(pdev);
218 	if (ret < 0) {
219 		dev_err(&pdev->dev, "error: could not enable device\n");
220 		return ret;
221 	}
222 
223 	pci_set_master(pdev);
224 
225 	INIT_DELAYED_WORK(&pci_info->work, proc_thermal_threshold_work_fn);
226 
227 	ret = proc_thermal_add(&pdev->dev, proc_priv);
228 	if (ret) {
229 		dev_err(&pdev->dev, "error: proc_thermal_add, will continue\n");
230 		pci_info->no_legacy = 1;
231 	}
232 
233 	proc_priv->priv_data = pci_info;
234 	pci_info->proc_priv = proc_priv;
235 	pci_set_drvdata(pdev, proc_priv);
236 
237 	ret = proc_thermal_mmio_add(pdev, proc_priv, id->driver_data);
238 	if (ret)
239 		goto err_ret_thermal;
240 
241 	psv_trip.temperature = get_trip_temp(pci_info);
242 
243 	pci_info->tzone = thermal_zone_device_register_with_trips("TCPU_PCI", &psv_trip,
244 							1, 1, pci_info,
245 							&tzone_ops,
246 							&tzone_params, 0, 0);
247 	if (IS_ERR(pci_info->tzone)) {
248 		ret = PTR_ERR(pci_info->tzone);
249 		goto err_ret_mmio;
250 	}
251 
252 	/* request and enable interrupt */
253 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
254 	if (ret < 0) {
255 		dev_err(&pdev->dev, "Failed to allocate vectors!\n");
256 		goto err_ret_tzone;
257 	}
258 	if (!pdev->msi_enabled && !pdev->msix_enabled)
259 		irq_flag = IRQF_SHARED;
260 
261 	irq =  pci_irq_vector(pdev, 0);
262 	ret = devm_request_threaded_irq(&pdev->dev, irq,
263 					proc_thermal_irq_handler, NULL,
264 					irq_flag, KBUILD_MODNAME, pci_info);
265 	if (ret) {
266 		dev_err(&pdev->dev, "Request IRQ %d failed\n", pdev->irq);
267 		goto err_free_vectors;
268 	}
269 
270 	ret = thermal_zone_device_enable(pci_info->tzone);
271 	if (ret)
272 		goto err_free_vectors;
273 
274 	return 0;
275 
276 err_free_vectors:
277 	pci_free_irq_vectors(pdev);
278 err_ret_tzone:
279 	thermal_zone_device_unregister(pci_info->tzone);
280 err_ret_mmio:
281 	proc_thermal_mmio_remove(pdev, proc_priv);
282 err_ret_thermal:
283 	if (!pci_info->no_legacy)
284 		proc_thermal_remove(proc_priv);
285 	pci_disable_device(pdev);
286 
287 	return ret;
288 }
289 
290 static void proc_thermal_pci_remove(struct pci_dev *pdev)
291 {
292 	struct proc_thermal_device *proc_priv = pci_get_drvdata(pdev);
293 	struct proc_thermal_pci *pci_info = proc_priv->priv_data;
294 
295 	cancel_delayed_work_sync(&pci_info->work);
296 
297 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0, 0);
298 	proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
299 
300 	devm_free_irq(&pdev->dev, pdev->irq, pci_info);
301 	pci_free_irq_vectors(pdev);
302 
303 	thermal_zone_device_unregister(pci_info->tzone);
304 	proc_thermal_mmio_remove(pdev, pci_info->proc_priv);
305 	if (!pci_info->no_legacy)
306 		proc_thermal_remove(proc_priv);
307 	pci_disable_device(pdev);
308 }
309 
310 #ifdef CONFIG_PM_SLEEP
311 static int proc_thermal_pci_suspend(struct device *dev)
312 {
313 	struct pci_dev *pdev = to_pci_dev(dev);
314 	struct proc_thermal_device *proc_priv;
315 	struct proc_thermal_pci *pci_info;
316 
317 	proc_priv = pci_get_drvdata(pdev);
318 	pci_info = proc_priv->priv_data;
319 
320 	if (!pci_info->no_legacy)
321 		return proc_thermal_suspend(dev);
322 
323 	return 0;
324 }
325 static int proc_thermal_pci_resume(struct device *dev)
326 {
327 	struct pci_dev *pdev = to_pci_dev(dev);
328 	struct proc_thermal_device *proc_priv;
329 	struct proc_thermal_pci *pci_info;
330 
331 	proc_priv = pci_get_drvdata(pdev);
332 	pci_info = proc_priv->priv_data;
333 
334 	if (pci_info->stored_thres) {
335 		proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_THRES_0,
336 					 pci_info->stored_thres / 1000);
337 		proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 1);
338 	}
339 
340 	if (!pci_info->no_legacy)
341 		return proc_thermal_resume(dev);
342 
343 	return 0;
344 }
345 #else
346 #define proc_thermal_pci_suspend NULL
347 #define proc_thermal_pci_resume NULL
348 #endif
349 
350 static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
351 			 proc_thermal_pci_resume);
352 
353 static const struct pci_device_id proc_thermal_pci_ids[] = {
354 	{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
355 	{ PCI_DEVICE_DATA(INTEL, MTLP_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
356 	{ PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
357 	{ },
358 };
359 
360 MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
361 
362 static struct pci_driver proc_thermal_pci_driver = {
363 	.name		= DRV_NAME,
364 	.probe		= proc_thermal_pci_probe,
365 	.remove	= proc_thermal_pci_remove,
366 	.id_table	= proc_thermal_pci_ids,
367 	.driver.pm	= &proc_thermal_pci_pm,
368 };
369 
370 module_pci_driver(proc_thermal_pci_driver);
371 
372 MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
373 MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
374 MODULE_LICENSE("GPL v2");
375