1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/kernel.h>
9 #include <linux/device.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/workqueue.h>
17 #include <linux/pm_domain.h>
18 #include <linux/pm_runtime.h>
19 
20 #include <linux/mei.h>
21 
22 
23 #include "mei_dev.h"
24 #include "hw-txe.h"
25 
26 static const struct pci_device_id mei_txe_pci_tbl[] = {
27 	{PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
28 	{PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
29 
30 	{0, }
31 };
32 MODULE_DEVICE_TABLE(pci, mei_txe_pci_tbl);
33 
34 #ifdef CONFIG_PM
35 static inline void mei_txe_set_pm_domain(struct mei_device *dev);
36 static inline void mei_txe_unset_pm_domain(struct mei_device *dev);
37 #else
mei_txe_set_pm_domain(struct mei_device * dev)38 static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
mei_txe_unset_pm_domain(struct mei_device * dev)39 static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
40 #endif /* CONFIG_PM */
41 
42 /**
43  * mei_txe_probe - Device Initialization Routine
44  *
45  * @pdev: PCI device structure
46  * @ent: entry in mei_txe_pci_tbl
47  *
48  * Return: 0 on success, <0 on failure.
49  */
mei_txe_probe(struct pci_dev * pdev,const struct pci_device_id * ent)50 static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
51 {
52 	struct mei_device *dev;
53 	struct mei_txe_hw *hw;
54 	const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
55 	int err;
56 
57 	/* enable pci dev */
58 	err = pcim_enable_device(pdev);
59 	if (err) {
60 		dev_err(&pdev->dev, "failed to enable pci device.\n");
61 		goto end;
62 	}
63 	/* set PCI host mastering  */
64 	pci_set_master(pdev);
65 	/* pci request regions and mapping IO device memory for mei driver */
66 	err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
67 	if (err) {
68 		dev_err(&pdev->dev, "failed to get pci regions.\n");
69 		goto end;
70 	}
71 
72 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
73 	if (err) {
74 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
75 		if (err) {
76 			dev_err(&pdev->dev, "No suitable DMA available.\n");
77 			goto end;
78 		}
79 	}
80 
81 	/* allocates and initializes the mei dev structure */
82 	dev = mei_txe_dev_init(pdev);
83 	if (!dev) {
84 		err = -ENOMEM;
85 		goto end;
86 	}
87 	hw = to_txe_hw(dev);
88 	hw->mem_addr = pcim_iomap_table(pdev);
89 
90 	pci_enable_msi(pdev);
91 
92 	/* clear spurious interrupts */
93 	mei_clear_interrupts(dev);
94 
95 	/* request and enable interrupt  */
96 	if (pci_dev_msi_enabled(pdev))
97 		err = request_threaded_irq(pdev->irq,
98 			NULL,
99 			mei_txe_irq_thread_handler,
100 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
101 	else
102 		err = request_threaded_irq(pdev->irq,
103 			mei_txe_irq_quick_handler,
104 			mei_txe_irq_thread_handler,
105 			IRQF_SHARED, KBUILD_MODNAME, dev);
106 	if (err) {
107 		dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
108 			pdev->irq);
109 		goto end;
110 	}
111 
112 	if (mei_start(dev)) {
113 		dev_err(&pdev->dev, "init hw failure.\n");
114 		err = -ENODEV;
115 		goto release_irq;
116 	}
117 
118 	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT);
119 	pm_runtime_use_autosuspend(&pdev->dev);
120 
121 	err = mei_register(dev, &pdev->dev);
122 	if (err)
123 		goto stop;
124 
125 	pci_set_drvdata(pdev, dev);
126 
127 	/*
128 	 * MEI requires to resume from runtime suspend mode
129 	 * in order to perform link reset flow upon system suspend.
130 	 */
131 	dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
132 
133 	/*
134 	 * TXE maps runtime suspend/resume to own power gating states,
135 	 * hence we need to go around native PCI runtime service which
136 	 * eventually brings the device into D3cold/hot state.
137 	 * But the TXE device cannot wake up from D3 unlike from own
138 	 * power gating. To get around PCI device native runtime pm,
139 	 * TXE uses runtime pm domain handlers which take precedence.
140 	 */
141 	mei_txe_set_pm_domain(dev);
142 
143 	pm_runtime_put_noidle(&pdev->dev);
144 
145 	return 0;
146 
147 stop:
148 	mei_stop(dev);
149 release_irq:
150 	mei_cancel_work(dev);
151 	mei_disable_interrupts(dev);
152 	free_irq(pdev->irq, dev);
153 end:
154 	dev_err(&pdev->dev, "initialization failed.\n");
155 	return err;
156 }
157 
158 /**
159  * mei_txe_remove - Device Shutdown Routine
160  *
161  * @pdev: PCI device structure
162  *
163  *  mei_txe_shutdown is called from the reboot notifier
164  *  it's a simplified version of remove so we go down
165  *  faster.
166  */
mei_txe_shutdown(struct pci_dev * pdev)167 static void mei_txe_shutdown(struct pci_dev *pdev)
168 {
169 	struct mei_device *dev;
170 
171 	dev = pci_get_drvdata(pdev);
172 	if (!dev)
173 		return;
174 
175 	dev_dbg(&pdev->dev, "shutdown\n");
176 	mei_stop(dev);
177 
178 	mei_txe_unset_pm_domain(dev);
179 
180 	mei_disable_interrupts(dev);
181 	free_irq(pdev->irq, dev);
182 }
183 
184 /**
185  * mei_txe_remove - Device Removal Routine
186  *
187  * @pdev: PCI device structure
188  *
189  * mei_remove is called by the PCI subsystem to alert the driver
190  * that it should release a PCI device.
191  */
mei_txe_remove(struct pci_dev * pdev)192 static void mei_txe_remove(struct pci_dev *pdev)
193 {
194 	struct mei_device *dev;
195 
196 	dev = pci_get_drvdata(pdev);
197 	if (!dev) {
198 		dev_err(&pdev->dev, "mei: dev == NULL\n");
199 		return;
200 	}
201 
202 	pm_runtime_get_noresume(&pdev->dev);
203 
204 	mei_stop(dev);
205 
206 	mei_txe_unset_pm_domain(dev);
207 
208 	mei_disable_interrupts(dev);
209 	free_irq(pdev->irq, dev);
210 
211 	mei_deregister(dev);
212 }
213 
214 
215 #ifdef CONFIG_PM_SLEEP
mei_txe_pci_suspend(struct device * device)216 static int mei_txe_pci_suspend(struct device *device)
217 {
218 	struct pci_dev *pdev = to_pci_dev(device);
219 	struct mei_device *dev = pci_get_drvdata(pdev);
220 
221 	if (!dev)
222 		return -ENODEV;
223 
224 	dev_dbg(&pdev->dev, "suspend\n");
225 
226 	mei_stop(dev);
227 
228 	mei_disable_interrupts(dev);
229 
230 	free_irq(pdev->irq, dev);
231 	pci_disable_msi(pdev);
232 
233 	return 0;
234 }
235 
mei_txe_pci_resume(struct device * device)236 static int mei_txe_pci_resume(struct device *device)
237 {
238 	struct pci_dev *pdev = to_pci_dev(device);
239 	struct mei_device *dev;
240 	int err;
241 
242 	dev = pci_get_drvdata(pdev);
243 	if (!dev)
244 		return -ENODEV;
245 
246 	pci_enable_msi(pdev);
247 
248 	mei_clear_interrupts(dev);
249 
250 	/* request and enable interrupt */
251 	if (pci_dev_msi_enabled(pdev))
252 		err = request_threaded_irq(pdev->irq,
253 			NULL,
254 			mei_txe_irq_thread_handler,
255 			IRQF_ONESHOT, KBUILD_MODNAME, dev);
256 	else
257 		err = request_threaded_irq(pdev->irq,
258 			mei_txe_irq_quick_handler,
259 			mei_txe_irq_thread_handler,
260 			IRQF_SHARED, KBUILD_MODNAME, dev);
261 	if (err) {
262 		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
263 				pdev->irq);
264 		return err;
265 	}
266 
267 	err = mei_restart(dev);
268 
269 	return err;
270 }
271 #endif /* CONFIG_PM_SLEEP */
272 
273 #ifdef CONFIG_PM
mei_txe_pm_runtime_idle(struct device * device)274 static int mei_txe_pm_runtime_idle(struct device *device)
275 {
276 	struct mei_device *dev;
277 
278 	dev_dbg(device, "rpm: txe: runtime_idle\n");
279 
280 	dev = dev_get_drvdata(device);
281 	if (!dev)
282 		return -ENODEV;
283 	if (mei_write_is_idle(dev))
284 		pm_runtime_autosuspend(device);
285 
286 	return -EBUSY;
287 }
mei_txe_pm_runtime_suspend(struct device * device)288 static int mei_txe_pm_runtime_suspend(struct device *device)
289 {
290 	struct mei_device *dev;
291 	int ret;
292 
293 	dev_dbg(device, "rpm: txe: runtime suspend\n");
294 
295 	dev = dev_get_drvdata(device);
296 	if (!dev)
297 		return -ENODEV;
298 
299 	mutex_lock(&dev->device_lock);
300 
301 	if (mei_write_is_idle(dev))
302 		ret = mei_txe_aliveness_set_sync(dev, 0);
303 	else
304 		ret = -EAGAIN;
305 
306 	/* keep irq on we are staying in D0 */
307 
308 	dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret);
309 
310 	mutex_unlock(&dev->device_lock);
311 
312 	if (ret && ret != -EAGAIN)
313 		schedule_work(&dev->reset_work);
314 
315 	return ret;
316 }
317 
mei_txe_pm_runtime_resume(struct device * device)318 static int mei_txe_pm_runtime_resume(struct device *device)
319 {
320 	struct mei_device *dev;
321 	int ret;
322 
323 	dev_dbg(device, "rpm: txe: runtime resume\n");
324 
325 	dev = dev_get_drvdata(device);
326 	if (!dev)
327 		return -ENODEV;
328 
329 	mutex_lock(&dev->device_lock);
330 
331 	mei_enable_interrupts(dev);
332 
333 	ret = mei_txe_aliveness_set_sync(dev, 1);
334 
335 	mutex_unlock(&dev->device_lock);
336 
337 	dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret);
338 
339 	if (ret)
340 		schedule_work(&dev->reset_work);
341 
342 	return ret;
343 }
344 
345 /**
346  * mei_txe_set_pm_domain - fill and set pm domain structure for device
347  *
348  * @dev: mei_device
349  */
mei_txe_set_pm_domain(struct mei_device * dev)350 static inline void mei_txe_set_pm_domain(struct mei_device *dev)
351 {
352 	struct pci_dev *pdev  = to_pci_dev(dev->dev);
353 
354 	if (pdev->dev.bus && pdev->dev.bus->pm) {
355 		dev->pg_domain.ops = *pdev->dev.bus->pm;
356 
357 		dev->pg_domain.ops.runtime_suspend = mei_txe_pm_runtime_suspend;
358 		dev->pg_domain.ops.runtime_resume = mei_txe_pm_runtime_resume;
359 		dev->pg_domain.ops.runtime_idle = mei_txe_pm_runtime_idle;
360 
361 		dev_pm_domain_set(&pdev->dev, &dev->pg_domain);
362 	}
363 }
364 
365 /**
366  * mei_txe_unset_pm_domain - clean pm domain structure for device
367  *
368  * @dev: mei_device
369  */
mei_txe_unset_pm_domain(struct mei_device * dev)370 static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
371 {
372 	/* stop using pm callbacks if any */
373 	dev_pm_domain_set(dev->dev, NULL);
374 }
375 
376 static const struct dev_pm_ops mei_txe_pm_ops = {
377 	SET_SYSTEM_SLEEP_PM_OPS(mei_txe_pci_suspend,
378 				mei_txe_pci_resume)
379 	SET_RUNTIME_PM_OPS(
380 		mei_txe_pm_runtime_suspend,
381 		mei_txe_pm_runtime_resume,
382 		mei_txe_pm_runtime_idle)
383 };
384 
385 #define MEI_TXE_PM_OPS	(&mei_txe_pm_ops)
386 #else
387 #define MEI_TXE_PM_OPS	NULL
388 #endif /* CONFIG_PM */
389 
390 /*
391  *  PCI driver structure
392  */
393 static struct pci_driver mei_txe_driver = {
394 	.name = KBUILD_MODNAME,
395 	.id_table = mei_txe_pci_tbl,
396 	.probe = mei_txe_probe,
397 	.remove = mei_txe_remove,
398 	.shutdown = mei_txe_shutdown,
399 	.driver.pm = MEI_TXE_PM_OPS,
400 };
401 
402 module_pci_driver(mei_txe_driver);
403 
404 MODULE_AUTHOR("Intel Corporation");
405 MODULE_DESCRIPTION("Intel(R) Trusted Execution Environment Interface");
406 MODULE_LICENSE("GPL v2");
407