1 /*-
2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd.
3 * All rights reserved.
4 * Copyright (c) 2020-2022 The FreeBSD Foundation
5 *
6 * Portions of this software were developed by Björn Zeeb
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/sysctl.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/fcntl.h>
40 #include <sys/file.h>
41 #include <sys/filio.h>
42 #include <sys/pciio.h>
43 #include <sys/pctrie.h>
44 #include <sys/rman.h>
45 #include <sys/rwlock.h>
46
47 #include <vm/vm.h>
48 #include <vm/pmap.h>
49
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <machine/stdarg.h>
53
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/pci_private.h>
56 #include <dev/pci/pci_iov.h>
57 #include <dev/backlight/backlight.h>
58
59 #include <linux/kernel.h>
60 #include <linux/kobject.h>
61 #include <linux/device.h>
62 #include <linux/slab.h>
63 #include <linux/module.h>
64 #include <linux/cdev.h>
65 #include <linux/file.h>
66 #include <linux/sysfs.h>
67 #include <linux/mm.h>
68 #include <linux/io.h>
69 #include <linux/vmalloc.h>
70 #include <linux/pci.h>
71 #include <linux/compat.h>
72
73 #include <linux/backlight.h>
74
75 #include "backlight_if.h"
76 #include "pcib_if.h"
77
78 /* Undef the linux function macro defined in linux/pci.h */
79 #undef pci_get_class
80
81 extern int linuxkpi_debug;
82
83 SYSCTL_DECL(_compat_linuxkpi);
84
85 static counter_u64_t lkpi_pci_nseg1_fail;
86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD,
87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment");
88
89 static device_probe_t linux_pci_probe;
90 static device_attach_t linux_pci_attach;
91 static device_detach_t linux_pci_detach;
92 static device_suspend_t linux_pci_suspend;
93 static device_resume_t linux_pci_resume;
94 static device_shutdown_t linux_pci_shutdown;
95 static pci_iov_init_t linux_pci_iov_init;
96 static pci_iov_uninit_t linux_pci_iov_uninit;
97 static pci_iov_add_vf_t linux_pci_iov_add_vf;
98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props);
99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props);
100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info);
101 static void lkpi_pcim_iomap_table_release(struct device *, void *);
102
103 static device_method_t pci_methods[] = {
104 DEVMETHOD(device_probe, linux_pci_probe),
105 DEVMETHOD(device_attach, linux_pci_attach),
106 DEVMETHOD(device_detach, linux_pci_detach),
107 DEVMETHOD(device_suspend, linux_pci_suspend),
108 DEVMETHOD(device_resume, linux_pci_resume),
109 DEVMETHOD(device_shutdown, linux_pci_shutdown),
110 DEVMETHOD(pci_iov_init, linux_pci_iov_init),
111 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit),
112 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf),
113
114 /* backlight interface */
115 DEVMETHOD(backlight_update_status, linux_backlight_update_status),
116 DEVMETHOD(backlight_get_status, linux_backlight_get_status),
117 DEVMETHOD(backlight_get_info, linux_backlight_get_info),
118 DEVMETHOD_END
119 };
120
121 const char *pci_power_names[] = {
122 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold"
123 };
124
125 /* We need some meta-struct to keep track of these for devres. */
126 struct pci_devres {
127 bool enable_io;
128 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
129 uint8_t region_mask;
130 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
131 };
132 struct pcim_iomap_devres {
133 void *mmio_table[PCIR_MAX_BAR_0 + 1];
134 struct resource *res_table[PCIR_MAX_BAR_0 + 1];
135 };
136
137 struct linux_dma_priv {
138 uint64_t dma_mask;
139 bus_dma_tag_t dmat;
140 uint64_t dma_coherent_mask;
141 bus_dma_tag_t dmat_coherent;
142 struct mtx lock;
143 struct pctrie ptree;
144 };
145 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock)
146 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock)
147
148 static int
linux_pdev_dma_uninit(struct pci_dev * pdev)149 linux_pdev_dma_uninit(struct pci_dev *pdev)
150 {
151 struct linux_dma_priv *priv;
152
153 priv = pdev->dev.dma_priv;
154 if (priv->dmat)
155 bus_dma_tag_destroy(priv->dmat);
156 if (priv->dmat_coherent)
157 bus_dma_tag_destroy(priv->dmat_coherent);
158 mtx_destroy(&priv->lock);
159 pdev->dev.dma_priv = NULL;
160 free(priv, M_DEVBUF);
161 return (0);
162 }
163
164 static int
linux_pdev_dma_init(struct pci_dev * pdev)165 linux_pdev_dma_init(struct pci_dev *pdev)
166 {
167 struct linux_dma_priv *priv;
168 int error;
169
170 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
171
172 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF);
173 pctrie_init(&priv->ptree);
174
175 pdev->dev.dma_priv = priv;
176
177 /* Create a default DMA tags. */
178 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64));
179 if (error != 0)
180 goto err;
181 /* Coherent is lower 32bit only by default in Linux. */
182 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32));
183 if (error != 0)
184 goto err;
185
186 return (error);
187
188 err:
189 linux_pdev_dma_uninit(pdev);
190 return (error);
191 }
192
193 int
linux_dma_tag_init(struct device * dev,u64 dma_mask)194 linux_dma_tag_init(struct device *dev, u64 dma_mask)
195 {
196 struct linux_dma_priv *priv;
197 int error;
198
199 priv = dev->dma_priv;
200
201 if (priv->dmat) {
202 if (priv->dma_mask == dma_mask)
203 return (0);
204
205 bus_dma_tag_destroy(priv->dmat);
206 }
207
208 priv->dma_mask = dma_mask;
209
210 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
211 1, 0, /* alignment, boundary */
212 dma_mask, /* lowaddr */
213 BUS_SPACE_MAXADDR, /* highaddr */
214 NULL, NULL, /* filtfunc, filtfuncarg */
215 BUS_SPACE_MAXSIZE, /* maxsize */
216 1, /* nsegments */
217 BUS_SPACE_MAXSIZE, /* maxsegsz */
218 0, /* flags */
219 NULL, NULL, /* lockfunc, lockfuncarg */
220 &priv->dmat);
221 return (-error);
222 }
223
224 int
linux_dma_tag_init_coherent(struct device * dev,u64 dma_mask)225 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask)
226 {
227 struct linux_dma_priv *priv;
228 int error;
229
230 priv = dev->dma_priv;
231
232 if (priv->dmat_coherent) {
233 if (priv->dma_coherent_mask == dma_mask)
234 return (0);
235
236 bus_dma_tag_destroy(priv->dmat_coherent);
237 }
238
239 priv->dma_coherent_mask = dma_mask;
240
241 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
242 1, 0, /* alignment, boundary */
243 dma_mask, /* lowaddr */
244 BUS_SPACE_MAXADDR, /* highaddr */
245 NULL, NULL, /* filtfunc, filtfuncarg */
246 BUS_SPACE_MAXSIZE, /* maxsize */
247 1, /* nsegments */
248 BUS_SPACE_MAXSIZE, /* maxsegsz */
249 0, /* flags */
250 NULL, NULL, /* lockfunc, lockfuncarg */
251 &priv->dmat_coherent);
252 return (-error);
253 }
254
255 static struct pci_driver *
linux_pci_find(device_t dev,const struct pci_device_id ** idp)256 linux_pci_find(device_t dev, const struct pci_device_id **idp)
257 {
258 const struct pci_device_id *id;
259 struct pci_driver *pdrv;
260 uint16_t vendor;
261 uint16_t device;
262 uint16_t subvendor;
263 uint16_t subdevice;
264
265 vendor = pci_get_vendor(dev);
266 device = pci_get_device(dev);
267 subvendor = pci_get_subvendor(dev);
268 subdevice = pci_get_subdevice(dev);
269
270 spin_lock(&pci_lock);
271 list_for_each_entry(pdrv, &pci_drivers, node) {
272 for (id = pdrv->id_table; id->vendor != 0; id++) {
273 if (vendor == id->vendor &&
274 (PCI_ANY_ID == id->device || device == id->device) &&
275 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) &&
276 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) {
277 *idp = id;
278 spin_unlock(&pci_lock);
279 return (pdrv);
280 }
281 }
282 }
283 spin_unlock(&pci_lock);
284 return (NULL);
285 }
286
287 struct pci_dev *
lkpi_pci_get_device(uint16_t vendor,uint16_t device,struct pci_dev * odev)288 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev)
289 {
290 struct pci_dev *pdev;
291
292 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__));
293
294 spin_lock(&pci_lock);
295 list_for_each_entry(pdev, &pci_devices, links) {
296 if (pdev->vendor == vendor && pdev->device == device)
297 break;
298 }
299 spin_unlock(&pci_lock);
300
301 return (pdev);
302 }
303
304 static void
lkpi_pci_dev_release(struct device * dev)305 lkpi_pci_dev_release(struct device *dev)
306 {
307
308 lkpi_devres_release_free_list(dev);
309 spin_lock_destroy(&dev->devres_lock);
310 }
311
312 static void
lkpifill_pci_dev(device_t dev,struct pci_dev * pdev)313 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev)
314 {
315
316 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev));
317 pdev->vendor = pci_get_vendor(dev);
318 pdev->device = pci_get_device(dev);
319 pdev->subsystem_vendor = pci_get_subvendor(dev);
320 pdev->subsystem_device = pci_get_subdevice(dev);
321 pdev->class = pci_get_class(dev);
322 pdev->revision = pci_get_revid(dev);
323 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d",
324 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
325 pci_get_function(dev));
326 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO);
327 /*
328 * This should be the upstream bridge; pci_upstream_bridge()
329 * handles that case on demand as otherwise we'll shadow the
330 * entire PCI hierarchy.
331 */
332 pdev->bus->self = pdev;
333 pdev->bus->number = pci_get_bus(dev);
334 pdev->bus->domain = pci_get_domain(dev);
335 pdev->dev.bsddev = dev;
336 pdev->dev.parent = &linux_root_device;
337 pdev->dev.release = lkpi_pci_dev_release;
338 INIT_LIST_HEAD(&pdev->dev.irqents);
339
340 if (pci_msi_count(dev) > 0)
341 pdev->msi_desc = malloc(pci_msi_count(dev) *
342 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO);
343
344 kobject_init(&pdev->dev.kobj, &linux_dev_ktype);
345 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev));
346 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj,
347 kobject_name(&pdev->dev.kobj));
348 spin_lock_init(&pdev->dev.devres_lock);
349 INIT_LIST_HEAD(&pdev->dev.devres_head);
350 }
351
352 static void
lkpinew_pci_dev_release(struct device * dev)353 lkpinew_pci_dev_release(struct device *dev)
354 {
355 struct pci_dev *pdev;
356 int i;
357
358 pdev = to_pci_dev(dev);
359 if (pdev->root != NULL)
360 pci_dev_put(pdev->root);
361 if (pdev->bus->self != pdev)
362 pci_dev_put(pdev->bus->self);
363 free(pdev->bus, M_DEVBUF);
364 if (pdev->msi_desc != NULL) {
365 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--)
366 free(pdev->msi_desc[i], M_DEVBUF);
367 free(pdev->msi_desc, M_DEVBUF);
368 }
369 kfree(pdev->path_name);
370 free(pdev, M_DEVBUF);
371 }
372
373 struct pci_dev *
lkpinew_pci_dev(device_t dev)374 lkpinew_pci_dev(device_t dev)
375 {
376 struct pci_dev *pdev;
377
378 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO);
379 lkpifill_pci_dev(dev, pdev);
380 pdev->dev.release = lkpinew_pci_dev_release;
381
382 return (pdev);
383 }
384
385 struct pci_dev *
lkpi_pci_get_class(unsigned int class,struct pci_dev * from)386 lkpi_pci_get_class(unsigned int class, struct pci_dev *from)
387 {
388 device_t dev;
389 device_t devfrom = NULL;
390 struct pci_dev *pdev;
391
392 if (from != NULL)
393 devfrom = from->dev.bsddev;
394
395 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom);
396 if (dev == NULL)
397 return (NULL);
398
399 pdev = lkpinew_pci_dev(dev);
400 return (pdev);
401 }
402
403 struct pci_dev *
lkpi_pci_get_domain_bus_and_slot(int domain,unsigned int bus,unsigned int devfn)404 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus,
405 unsigned int devfn)
406 {
407 device_t dev;
408 struct pci_dev *pdev;
409
410 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
411 if (dev == NULL)
412 return (NULL);
413
414 pdev = lkpinew_pci_dev(dev);
415 return (pdev);
416 }
417
418 static int
linux_pci_probe(device_t dev)419 linux_pci_probe(device_t dev)
420 {
421 const struct pci_device_id *id;
422 struct pci_driver *pdrv;
423
424 if ((pdrv = linux_pci_find(dev, &id)) == NULL)
425 return (ENXIO);
426 if (device_get_driver(dev) != &pdrv->bsddriver)
427 return (ENXIO);
428 device_set_desc(dev, pdrv->name);
429
430 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */
431 if (pdrv->bsd_probe_return == 0)
432 return (BUS_PROBE_DEFAULT);
433 else
434 return (pdrv->bsd_probe_return);
435 }
436
437 static int
linux_pci_attach(device_t dev)438 linux_pci_attach(device_t dev)
439 {
440 const struct pci_device_id *id;
441 struct pci_driver *pdrv;
442 struct pci_dev *pdev;
443
444 pdrv = linux_pci_find(dev, &id);
445 pdev = device_get_softc(dev);
446
447 MPASS(pdrv != NULL);
448 MPASS(pdev != NULL);
449
450 return (linux_pci_attach_device(dev, pdrv, id, pdev));
451 }
452
453 static struct resource_list_entry *
linux_pci_reserve_bar(struct pci_dev * pdev,struct resource_list * rl,int type,int rid)454 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl,
455 int type, int rid)
456 {
457 device_t dev;
458 struct resource *res;
459
460 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY,
461 ("trying to reserve non-BAR type %d", type));
462
463 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
464 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
465 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0,
466 1, 1, 0);
467 if (res == NULL)
468 return (NULL);
469 return (resource_list_find(rl, type, rid));
470 }
471
472 static struct resource_list_entry *
linux_pci_get_rle(struct pci_dev * pdev,int type,int rid,bool reserve_bar)473 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
474 {
475 struct pci_devinfo *dinfo;
476 struct resource_list *rl;
477 struct resource_list_entry *rle;
478
479 dinfo = device_get_ivars(pdev->dev.bsddev);
480 rl = &dinfo->resources;
481 rle = resource_list_find(rl, type, rid);
482 /* Reserve resources for this BAR if needed. */
483 if (rle == NULL && reserve_bar)
484 rle = linux_pci_reserve_bar(pdev, rl, type, rid);
485 return (rle);
486 }
487
488 int
linux_pci_attach_device(device_t dev,struct pci_driver * pdrv,const struct pci_device_id * id,struct pci_dev * pdev)489 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv,
490 const struct pci_device_id *id, struct pci_dev *pdev)
491 {
492 struct resource_list_entry *rle;
493 device_t parent;
494 uintptr_t rid;
495 int error;
496 bool isdrm;
497
498 linux_set_current(curthread);
499
500 parent = device_get_parent(dev);
501 isdrm = pdrv != NULL && pdrv->isdrm;
502
503 if (isdrm) {
504 struct pci_devinfo *dinfo;
505
506 dinfo = device_get_ivars(parent);
507 device_set_ivars(dev, dinfo);
508 }
509
510 lkpifill_pci_dev(dev, pdev);
511 if (isdrm)
512 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid);
513 else
514 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid);
515 pdev->devfn = rid;
516 pdev->pdrv = pdrv;
517 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false);
518 if (rle != NULL)
519 pdev->dev.irq = rle->start;
520 else
521 pdev->dev.irq = LINUX_IRQ_INVALID;
522 pdev->irq = pdev->dev.irq;
523 error = linux_pdev_dma_init(pdev);
524 if (error)
525 goto out_dma_init;
526
527 TAILQ_INIT(&pdev->mmio);
528 spin_lock_init(&pdev->pcie_cap_lock);
529
530 spin_lock(&pci_lock);
531 list_add(&pdev->links, &pci_devices);
532 spin_unlock(&pci_lock);
533
534 if (pdrv != NULL) {
535 error = pdrv->probe(pdev, id);
536 if (error)
537 goto out_probe;
538 }
539 return (0);
540
541 out_probe:
542 free(pdev->bus, M_DEVBUF);
543 spin_lock_destroy(&pdev->pcie_cap_lock);
544 linux_pdev_dma_uninit(pdev);
545 out_dma_init:
546 spin_lock(&pci_lock);
547 list_del(&pdev->links);
548 spin_unlock(&pci_lock);
549 put_device(&pdev->dev);
550 return (-error);
551 }
552
553 static int
linux_pci_detach(device_t dev)554 linux_pci_detach(device_t dev)
555 {
556 struct pci_dev *pdev;
557
558 pdev = device_get_softc(dev);
559
560 MPASS(pdev != NULL);
561
562 device_set_desc(dev, NULL);
563
564 return (linux_pci_detach_device(pdev));
565 }
566
567 int
linux_pci_detach_device(struct pci_dev * pdev)568 linux_pci_detach_device(struct pci_dev *pdev)
569 {
570
571 linux_set_current(curthread);
572
573 if (pdev->pdrv != NULL)
574 pdev->pdrv->remove(pdev);
575
576 if (pdev->root != NULL)
577 pci_dev_put(pdev->root);
578 free(pdev->bus, M_DEVBUF);
579 linux_pdev_dma_uninit(pdev);
580
581 spin_lock(&pci_lock);
582 list_del(&pdev->links);
583 spin_unlock(&pci_lock);
584 spin_lock_destroy(&pdev->pcie_cap_lock);
585 put_device(&pdev->dev);
586
587 return (0);
588 }
589
590 static int
lkpi_pci_disable_dev(struct device * dev)591 lkpi_pci_disable_dev(struct device *dev)
592 {
593
594 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY);
595 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT);
596 return (0);
597 }
598
599 static struct pci_devres *
lkpi_pci_devres_get_alloc(struct pci_dev * pdev)600 lkpi_pci_devres_get_alloc(struct pci_dev *pdev)
601 {
602 struct pci_devres *dr;
603
604 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL);
605 if (dr == NULL) {
606 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr),
607 GFP_KERNEL | __GFP_ZERO);
608 if (dr != NULL)
609 lkpi_devres_add(&pdev->dev, dr);
610 }
611
612 return (dr);
613 }
614
615 static struct pci_devres *
lkpi_pci_devres_find(struct pci_dev * pdev)616 lkpi_pci_devres_find(struct pci_dev *pdev)
617 {
618 if (!pdev->managed)
619 return (NULL);
620
621 return (lkpi_pci_devres_get_alloc(pdev));
622 }
623
624 void
lkpi_pci_devres_release(struct device * dev,void * p)625 lkpi_pci_devres_release(struct device *dev, void *p)
626 {
627 struct pci_devres *dr;
628 struct pci_dev *pdev;
629 int bar;
630
631 pdev = to_pci_dev(dev);
632 dr = p;
633
634 if (pdev->msix_enabled)
635 lkpi_pci_disable_msix(pdev);
636 if (pdev->msi_enabled)
637 lkpi_pci_disable_msi(pdev);
638
639 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0)
640 dr->enable_io = false;
641
642 if (dr->region_mask == 0)
643 return;
644 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
645
646 if ((dr->region_mask & (1 << bar)) == 0)
647 continue;
648 pci_release_region(pdev, bar);
649 }
650 }
651
652 int
linuxkpi_pcim_enable_device(struct pci_dev * pdev)653 linuxkpi_pcim_enable_device(struct pci_dev *pdev)
654 {
655 struct pci_devres *dr;
656 int error;
657
658 /* Here we cannot run through the pdev->managed check. */
659 dr = lkpi_pci_devres_get_alloc(pdev);
660 if (dr == NULL)
661 return (-ENOMEM);
662
663 /* If resources were enabled before do not do it again. */
664 if (dr->enable_io)
665 return (0);
666
667 error = pci_enable_device(pdev);
668 if (error == 0)
669 dr->enable_io = true;
670
671 /* This device is not managed. */
672 pdev->managed = true;
673
674 return (error);
675 }
676
677 static struct pcim_iomap_devres *
lkpi_pcim_iomap_devres_find(struct pci_dev * pdev)678 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev)
679 {
680 struct pcim_iomap_devres *dr;
681
682 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release,
683 NULL, NULL);
684 if (dr == NULL) {
685 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release,
686 sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
687 if (dr != NULL)
688 lkpi_devres_add(&pdev->dev, dr);
689 }
690
691 if (dr == NULL)
692 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__);
693
694 return (dr);
695 }
696
697 void __iomem **
linuxkpi_pcim_iomap_table(struct pci_dev * pdev)698 linuxkpi_pcim_iomap_table(struct pci_dev *pdev)
699 {
700 struct pcim_iomap_devres *dr;
701
702 dr = lkpi_pcim_iomap_devres_find(pdev);
703 if (dr == NULL)
704 return (NULL);
705
706 /*
707 * If the driver has manually set a flag to be able to request the
708 * resource to use bus_read/write_<n>, return the shadow table.
709 */
710 if (pdev->want_iomap_res)
711 return ((void **)dr->res_table);
712
713 /* This is the Linux default. */
714 return (dr->mmio_table);
715 }
716
717 static struct resource *
_lkpi_pci_iomap(struct pci_dev * pdev,int bar,int mmio_size __unused)718 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused)
719 {
720 struct pci_mmio_region *mmio, *p;
721 int type;
722
723 type = pci_resource_type(pdev, bar);
724 if (type < 0) {
725 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n",
726 __func__, bar, type);
727 return (NULL);
728 }
729
730 /*
731 * Check for duplicate mappings.
732 * This can happen if a driver calls pci_request_region() first.
733 */
734 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
735 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) {
736 return (mmio->res);
737 }
738 }
739
740 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
741 mmio->rid = PCIR_BAR(bar);
742 mmio->type = type;
743 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type,
744 &mmio->rid, RF_ACTIVE|RF_SHAREABLE);
745 if (mmio->res == NULL) {
746 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
747 "bar %d type %d rid %d\n",
748 __func__, bar, type, PCIR_BAR(bar));
749 free(mmio, M_DEVBUF);
750 return (NULL);
751 }
752 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
753
754 return (mmio->res);
755 }
756
757 void *
linuxkpi_pci_iomap(struct pci_dev * pdev,int mmio_bar,int mmio_size)758 linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
759 {
760 struct resource *res;
761
762 res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
763 if (res == NULL)
764 return (NULL);
765 /* This is a FreeBSD extension so we can use bus_*(). */
766 if (pdev->want_iomap_res)
767 return (res);
768 return ((void *)rman_get_bushandle(res));
769 }
770
771 void
linuxkpi_pci_iounmap(struct pci_dev * pdev,void * res)772 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res)
773 {
774 struct pci_mmio_region *mmio, *p;
775
776 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
777 if (res != (void *)rman_get_bushandle(mmio->res))
778 continue;
779 bus_release_resource(pdev->dev.bsddev,
780 mmio->type, mmio->rid, mmio->res);
781 TAILQ_REMOVE(&pdev->mmio, mmio, next);
782 free(mmio, M_DEVBUF);
783 return;
784 }
785 }
786
787 int
linuxkpi_pcim_iomap_regions(struct pci_dev * pdev,uint32_t mask,const char * name)788 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
789 {
790 struct pcim_iomap_devres *dr;
791 void *res;
792 uint32_t mappings;
793 int bar;
794
795 dr = lkpi_pcim_iomap_devres_find(pdev);
796 if (dr == NULL)
797 return (-ENOMEM);
798
799 /* Now iomap all the requested (by "mask") ones. */
800 for (bar = mappings = 0; mappings != mask; bar++) {
801 if ((mask & (1 << bar)) == 0)
802 continue;
803
804 /* Request double is not allowed. */
805 if (dr->mmio_table[bar] != NULL) {
806 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
807 __func__, bar, dr->mmio_table[bar]);
808 goto err;
809 }
810
811 res = _lkpi_pci_iomap(pdev, bar, 0);
812 if (res == NULL)
813 goto err;
814 dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
815 dr->res_table[bar] = res;
816
817 mappings |= (1 << bar);
818 }
819
820 return (0);
821 err:
822 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
823 if ((mappings & (1 << bar)) != 0) {
824 res = dr->mmio_table[bar];
825 if (res == NULL)
826 continue;
827 pci_iounmap(pdev, res);
828 }
829 }
830
831 return (-EINVAL);
832 }
833
834 static void
lkpi_pcim_iomap_table_release(struct device * dev,void * p)835 lkpi_pcim_iomap_table_release(struct device *dev, void *p)
836 {
837 struct pcim_iomap_devres *dr;
838 struct pci_dev *pdev;
839 int bar;
840
841 dr = p;
842 pdev = to_pci_dev(dev);
843 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
844
845 if (dr->mmio_table[bar] == NULL)
846 continue;
847
848 pci_iounmap(pdev, dr->mmio_table[bar]);
849 }
850 }
851
852 static int
linux_pci_suspend(device_t dev)853 linux_pci_suspend(device_t dev)
854 {
855 const struct dev_pm_ops *pmops;
856 struct pm_message pm = { };
857 struct pci_dev *pdev;
858 int error;
859
860 error = 0;
861 linux_set_current(curthread);
862 pdev = device_get_softc(dev);
863 pmops = pdev->pdrv->driver.pm;
864
865 if (pdev->pdrv->suspend != NULL)
866 error = -pdev->pdrv->suspend(pdev, pm);
867 else if (pmops != NULL && pmops->suspend != NULL) {
868 error = -pmops->suspend(&pdev->dev);
869 if (error == 0 && pmops->suspend_late != NULL)
870 error = -pmops->suspend_late(&pdev->dev);
871 if (error == 0 && pmops->suspend_noirq != NULL)
872 error = -pmops->suspend_noirq(&pdev->dev);
873 }
874 return (error);
875 }
876
877 static int
linux_pci_resume(device_t dev)878 linux_pci_resume(device_t dev)
879 {
880 const struct dev_pm_ops *pmops;
881 struct pci_dev *pdev;
882 int error;
883
884 error = 0;
885 linux_set_current(curthread);
886 pdev = device_get_softc(dev);
887 pmops = pdev->pdrv->driver.pm;
888
889 if (pdev->pdrv->resume != NULL)
890 error = -pdev->pdrv->resume(pdev);
891 else if (pmops != NULL && pmops->resume != NULL) {
892 if (pmops->resume_early != NULL)
893 error = -pmops->resume_early(&pdev->dev);
894 if (error == 0 && pmops->resume != NULL)
895 error = -pmops->resume(&pdev->dev);
896 }
897 return (error);
898 }
899
900 static int
linux_pci_shutdown(device_t dev)901 linux_pci_shutdown(device_t dev)
902 {
903 struct pci_dev *pdev;
904
905 linux_set_current(curthread);
906 pdev = device_get_softc(dev);
907 if (pdev->pdrv->shutdown != NULL)
908 pdev->pdrv->shutdown(pdev);
909 return (0);
910 }
911
912 static int
linux_pci_iov_init(device_t dev,uint16_t num_vfs,const nvlist_t * pf_config)913 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config)
914 {
915 struct pci_dev *pdev;
916 int error;
917
918 linux_set_current(curthread);
919 pdev = device_get_softc(dev);
920 if (pdev->pdrv->bsd_iov_init != NULL)
921 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config);
922 else
923 error = EINVAL;
924 return (error);
925 }
926
927 static void
linux_pci_iov_uninit(device_t dev)928 linux_pci_iov_uninit(device_t dev)
929 {
930 struct pci_dev *pdev;
931
932 linux_set_current(curthread);
933 pdev = device_get_softc(dev);
934 if (pdev->pdrv->bsd_iov_uninit != NULL)
935 pdev->pdrv->bsd_iov_uninit(dev);
936 }
937
938 static int
linux_pci_iov_add_vf(device_t dev,uint16_t vfnum,const nvlist_t * vf_config)939 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
940 {
941 struct pci_dev *pdev;
942 int error;
943
944 linux_set_current(curthread);
945 pdev = device_get_softc(dev);
946 if (pdev->pdrv->bsd_iov_add_vf != NULL)
947 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config);
948 else
949 error = EINVAL;
950 return (error);
951 }
952
953 static int
_linux_pci_register_driver(struct pci_driver * pdrv,devclass_t dc)954 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc)
955 {
956 int error;
957
958 linux_set_current(curthread);
959 spin_lock(&pci_lock);
960 list_add(&pdrv->node, &pci_drivers);
961 spin_unlock(&pci_lock);
962 if (pdrv->bsddriver.name == NULL)
963 pdrv->bsddriver.name = pdrv->name;
964 pdrv->bsddriver.methods = pci_methods;
965 pdrv->bsddriver.size = sizeof(struct pci_dev);
966
967 bus_topo_lock();
968 error = devclass_add_driver(dc, &pdrv->bsddriver,
969 BUS_PASS_DEFAULT, &pdrv->bsdclass);
970 bus_topo_unlock();
971 return (-error);
972 }
973
974 int
linux_pci_register_driver(struct pci_driver * pdrv)975 linux_pci_register_driver(struct pci_driver *pdrv)
976 {
977 devclass_t dc;
978
979 dc = devclass_find("pci");
980 if (dc == NULL)
981 return (-ENXIO);
982 pdrv->isdrm = false;
983 return (_linux_pci_register_driver(pdrv, dc));
984 }
985
986 static struct resource_list_entry *
lkpi_pci_get_bar(struct pci_dev * pdev,int bar,bool reserve)987 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
988 {
989 int type;
990
991 type = pci_resource_type(pdev, bar);
992 if (type < 0)
993 return (NULL);
994 bar = PCIR_BAR(bar);
995 return (linux_pci_get_rle(pdev, type, bar, reserve));
996 }
997
998 struct device *
lkpi_pci_find_irq_dev(unsigned int irq)999 lkpi_pci_find_irq_dev(unsigned int irq)
1000 {
1001 struct pci_dev *pdev;
1002 struct device *found;
1003
1004 found = NULL;
1005 spin_lock(&pci_lock);
1006 list_for_each_entry(pdev, &pci_devices, links) {
1007 if (irq == pdev->dev.irq ||
1008 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
1009 found = &pdev->dev;
1010 break;
1011 }
1012 }
1013 spin_unlock(&pci_lock);
1014 return (found);
1015 }
1016
1017 unsigned long
pci_resource_start(struct pci_dev * pdev,int bar)1018 pci_resource_start(struct pci_dev *pdev, int bar)
1019 {
1020 struct resource_list_entry *rle;
1021 rman_res_t newstart;
1022 device_t dev;
1023 int error;
1024
1025 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
1026 return (0);
1027 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ?
1028 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev;
1029 error = bus_translate_resource(dev, rle->type, rle->start, &newstart);
1030 if (error != 0) {
1031 device_printf(pdev->dev.bsddev,
1032 "translate of %#jx failed: %d\n",
1033 (uintmax_t)rle->start, error);
1034 return (0);
1035 }
1036 return (newstart);
1037 }
1038
1039 unsigned long
pci_resource_len(struct pci_dev * pdev,int bar)1040 pci_resource_len(struct pci_dev *pdev, int bar)
1041 {
1042 struct resource_list_entry *rle;
1043
1044 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL)
1045 return (0);
1046 return (rle->count);
1047 }
1048
1049 int
pci_request_region(struct pci_dev * pdev,int bar,const char * res_name)1050 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
1051 {
1052 struct resource *res;
1053 struct pci_devres *dr;
1054 struct pci_mmio_region *mmio;
1055 int rid;
1056 int type;
1057
1058 type = pci_resource_type(pdev, bar);
1059 if (type < 0)
1060 return (-ENODEV);
1061 rid = PCIR_BAR(bar);
1062 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid,
1063 RF_ACTIVE|RF_SHAREABLE);
1064 if (res == NULL) {
1065 device_printf(pdev->dev.bsddev, "%s: failed to alloc "
1066 "bar %d type %d rid %d\n",
1067 __func__, bar, type, PCIR_BAR(bar));
1068 return (-ENODEV);
1069 }
1070
1071 /*
1072 * It seems there is an implicit devres tracking on these if the device
1073 * is managed; otherwise the resources are not automatiaclly freed on
1074 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux
1075 * drivers.
1076 */
1077 dr = lkpi_pci_devres_find(pdev);
1078 if (dr != NULL) {
1079 dr->region_mask |= (1 << bar);
1080 dr->region_table[bar] = res;
1081 }
1082
1083 /* Even if the device is not managed we need to track it for iomap. */
1084 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO);
1085 mmio->rid = PCIR_BAR(bar);
1086 mmio->type = type;
1087 mmio->res = res;
1088 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next);
1089
1090 return (0);
1091 }
1092
1093 int
linuxkpi_pci_request_regions(struct pci_dev * pdev,const char * res_name)1094 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name)
1095 {
1096 int error;
1097 int i;
1098
1099 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
1100 error = pci_request_region(pdev, i, res_name);
1101 if (error && error != -ENODEV) {
1102 pci_release_regions(pdev);
1103 return (error);
1104 }
1105 }
1106 return (0);
1107 }
1108
1109 void
linuxkpi_pci_release_region(struct pci_dev * pdev,int bar)1110 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar)
1111 {
1112 struct resource_list_entry *rle;
1113 struct pci_devres *dr;
1114 struct pci_mmio_region *mmio, *p;
1115
1116 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL)
1117 return;
1118
1119 /*
1120 * As we implicitly track the requests we also need to clear them on
1121 * release. Do clear before resource release.
1122 */
1123 dr = lkpi_pci_devres_find(pdev);
1124 if (dr != NULL) {
1125 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
1126 " region_table res %p != rel->res %p\n", __func__, pdev,
1127 bar, dr->region_table[bar], rle->res));
1128 dr->region_table[bar] = NULL;
1129 dr->region_mask &= ~(1 << bar);
1130 }
1131
1132 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
1133 if (rle->res != (void *)rman_get_bushandle(mmio->res))
1134 continue;
1135 TAILQ_REMOVE(&pdev->mmio, mmio, next);
1136 free(mmio, M_DEVBUF);
1137 }
1138
1139 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
1140 }
1141
1142 void
linuxkpi_pci_release_regions(struct pci_dev * pdev)1143 linuxkpi_pci_release_regions(struct pci_dev *pdev)
1144 {
1145 int i;
1146
1147 for (i = 0; i <= PCIR_MAX_BAR_0; i++)
1148 pci_release_region(pdev, i);
1149 }
1150
1151 int
linux_pci_register_drm_driver(struct pci_driver * pdrv)1152 linux_pci_register_drm_driver(struct pci_driver *pdrv)
1153 {
1154 devclass_t dc;
1155
1156 dc = devclass_create("vgapci");
1157 if (dc == NULL)
1158 return (-ENXIO);
1159 pdrv->isdrm = true;
1160 pdrv->name = "drmn";
1161 return (_linux_pci_register_driver(pdrv, dc));
1162 }
1163
1164 void
linux_pci_unregister_driver(struct pci_driver * pdrv)1165 linux_pci_unregister_driver(struct pci_driver *pdrv)
1166 {
1167 devclass_t bus;
1168
1169 bus = devclass_find("pci");
1170
1171 spin_lock(&pci_lock);
1172 list_del(&pdrv->node);
1173 spin_unlock(&pci_lock);
1174 bus_topo_lock();
1175 if (bus != NULL)
1176 devclass_delete_driver(bus, &pdrv->bsddriver);
1177 bus_topo_unlock();
1178 }
1179
1180 void
linux_pci_unregister_drm_driver(struct pci_driver * pdrv)1181 linux_pci_unregister_drm_driver(struct pci_driver *pdrv)
1182 {
1183 devclass_t bus;
1184
1185 bus = devclass_find("vgapci");
1186
1187 spin_lock(&pci_lock);
1188 list_del(&pdrv->node);
1189 spin_unlock(&pci_lock);
1190 bus_topo_lock();
1191 if (bus != NULL)
1192 devclass_delete_driver(bus, &pdrv->bsddriver);
1193 bus_topo_unlock();
1194 }
1195
1196 int
linuxkpi_pci_enable_msix(struct pci_dev * pdev,struct msix_entry * entries,int nreq)1197 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries,
1198 int nreq)
1199 {
1200 struct resource_list_entry *rle;
1201 int error;
1202 int avail;
1203 int i;
1204
1205 avail = pci_msix_count(pdev->dev.bsddev);
1206 if (avail < nreq) {
1207 if (avail == 0)
1208 return -EINVAL;
1209 return avail;
1210 }
1211 avail = nreq;
1212 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
1213 return error;
1214 /*
1215 * Handle case where "pci_alloc_msix()" may allocate less
1216 * interrupts than available and return with no error:
1217 */
1218 if (avail < nreq) {
1219 pci_release_msi(pdev->dev.bsddev);
1220 return avail;
1221 }
1222 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
1223 pdev->dev.irq_start = rle->start;
1224 pdev->dev.irq_end = rle->start + avail;
1225 for (i = 0; i < nreq; i++)
1226 entries[i].vector = pdev->dev.irq_start + i;
1227 pdev->msix_enabled = true;
1228 return (0);
1229 }
1230
1231 int
_lkpi_pci_enable_msi_range(struct pci_dev * pdev,int minvec,int maxvec)1232 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec)
1233 {
1234 struct resource_list_entry *rle;
1235 int error;
1236 int nvec;
1237
1238 if (maxvec < minvec)
1239 return (-EINVAL);
1240
1241 nvec = pci_msi_count(pdev->dev.bsddev);
1242 if (nvec < 1 || nvec < minvec)
1243 return (-ENOSPC);
1244
1245 nvec = min(nvec, maxvec);
1246 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0)
1247 return error;
1248
1249 /* Native PCI might only ever ask for 32 vectors. */
1250 if (nvec < minvec) {
1251 pci_release_msi(pdev->dev.bsddev);
1252 return (-ENOSPC);
1253 }
1254
1255 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
1256 pdev->dev.irq_start = rle->start;
1257 pdev->dev.irq_end = rle->start + nvec;
1258 pdev->irq = rle->start;
1259 pdev->msi_enabled = true;
1260 return (0);
1261 }
1262
1263 int
pci_alloc_irq_vectors(struct pci_dev * pdev,int minv,int maxv,unsigned int flags)1264 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
1265 unsigned int flags)
1266 {
1267 int error;
1268
1269 if (flags & PCI_IRQ_MSIX) {
1270 struct msix_entry *entries;
1271 int i;
1272
1273 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL);
1274 if (entries == NULL) {
1275 error = -ENOMEM;
1276 goto out;
1277 }
1278 for (i = 0; i < maxv; ++i)
1279 entries[i].entry = i;
1280 error = pci_enable_msix(pdev, entries, maxv);
1281 out:
1282 kfree(entries);
1283 if (error == 0 && pdev->msix_enabled)
1284 return (pdev->dev.irq_end - pdev->dev.irq_start);
1285 }
1286 if (flags & PCI_IRQ_MSI) {
1287 if (pci_msi_count(pdev->dev.bsddev) < minv)
1288 return (-ENOSPC);
1289 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv);
1290 if (error == 0 && pdev->msi_enabled)
1291 return (pdev->dev.irq_end - pdev->dev.irq_start);
1292 }
1293 if (flags & PCI_IRQ_LEGACY) {
1294 if (pdev->irq)
1295 return (1);
1296 }
1297
1298 return (-EINVAL);
1299 }
1300
1301 struct msi_desc *
lkpi_pci_msi_desc_alloc(int irq)1302 lkpi_pci_msi_desc_alloc(int irq)
1303 {
1304 struct device *dev;
1305 struct pci_dev *pdev;
1306 struct msi_desc *desc;
1307 struct pci_devinfo *dinfo;
1308 struct pcicfg_msi *msi;
1309 int vec;
1310
1311 dev = lkpi_pci_find_irq_dev(irq);
1312 if (dev == NULL)
1313 return (NULL);
1314
1315 pdev = to_pci_dev(dev);
1316
1317 if (pdev->msi_desc == NULL)
1318 return (NULL);
1319
1320 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end)
1321 return (NULL);
1322
1323 vec = pdev->dev.irq_start - irq;
1324
1325 if (pdev->msi_desc[vec] != NULL)
1326 return (pdev->msi_desc[vec]);
1327
1328 dinfo = device_get_ivars(dev->bsddev);
1329 msi = &dinfo->cfg.msi;
1330
1331 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO);
1332
1333 desc->pci.msi_attrib.is_64 =
1334 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false;
1335 desc->msg.data = msi->msi_data;
1336
1337 pdev->msi_desc[vec] = desc;
1338
1339 return (desc);
1340 }
1341
1342 bool
pci_device_is_present(struct pci_dev * pdev)1343 pci_device_is_present(struct pci_dev *pdev)
1344 {
1345 device_t dev;
1346
1347 dev = pdev->dev.bsddev;
1348
1349 return (bus_child_present(dev));
1350 }
1351
1352 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t));
1353
1354 struct linux_dma_obj {
1355 void *vaddr;
1356 uint64_t dma_addr;
1357 bus_dmamap_t dmamap;
1358 bus_dma_tag_t dmat;
1359 };
1360
1361 static uma_zone_t linux_dma_trie_zone;
1362 static uma_zone_t linux_dma_obj_zone;
1363
1364 static void
linux_dma_init(void * arg)1365 linux_dma_init(void *arg)
1366 {
1367
1368 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie",
1369 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL,
1370 UMA_ALIGN_PTR, 0);
1371 linux_dma_obj_zone = uma_zcreate("linux_dma_object",
1372 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL,
1373 UMA_ALIGN_PTR, 0);
1374 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK);
1375 }
1376 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL);
1377
1378 static void
linux_dma_uninit(void * arg)1379 linux_dma_uninit(void *arg)
1380 {
1381
1382 counter_u64_free(lkpi_pci_nseg1_fail);
1383 uma_zdestroy(linux_dma_obj_zone);
1384 uma_zdestroy(linux_dma_trie_zone);
1385 }
1386 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL);
1387
1388 static void *
linux_dma_trie_alloc(struct pctrie * ptree)1389 linux_dma_trie_alloc(struct pctrie *ptree)
1390 {
1391
1392 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT));
1393 }
1394
1395 static void
linux_dma_trie_free(struct pctrie * ptree,void * node)1396 linux_dma_trie_free(struct pctrie *ptree, void *node)
1397 {
1398
1399 uma_zfree(linux_dma_trie_zone, node);
1400 }
1401
1402 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc,
1403 linux_dma_trie_free);
1404
1405 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1406 static dma_addr_t
linux_dma_map_phys_common(struct device * dev,vm_paddr_t phys,size_t len,bus_dma_tag_t dmat)1407 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len,
1408 bus_dma_tag_t dmat)
1409 {
1410 struct linux_dma_priv *priv;
1411 struct linux_dma_obj *obj;
1412 int error, nseg;
1413 bus_dma_segment_t seg;
1414
1415 priv = dev->dma_priv;
1416
1417 /*
1418 * If the resultant mapping will be entirely 1:1 with the
1419 * physical address, short-circuit the remainder of the
1420 * bus_dma API. This avoids tracking collisions in the pctrie
1421 * with the additional benefit of reducing overhead.
1422 */
1423 if (bus_dma_id_mapped(dmat, phys, len))
1424 return (phys);
1425
1426 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT);
1427 if (obj == NULL) {
1428 return (0);
1429 }
1430 obj->dmat = dmat;
1431
1432 DMA_PRIV_LOCK(priv);
1433 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) {
1434 DMA_PRIV_UNLOCK(priv);
1435 uma_zfree(linux_dma_obj_zone, obj);
1436 return (0);
1437 }
1438
1439 nseg = -1;
1440 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len,
1441 BUS_DMA_NOWAIT, &seg, &nseg) != 0) {
1442 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1443 DMA_PRIV_UNLOCK(priv);
1444 uma_zfree(linux_dma_obj_zone, obj);
1445 counter_u64_add(lkpi_pci_nseg1_fail, 1);
1446 if (linuxkpi_debug)
1447 dump_stack();
1448 return (0);
1449 }
1450
1451 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
1452 obj->dma_addr = seg.ds_addr;
1453
1454 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj);
1455 if (error != 0) {
1456 bus_dmamap_unload(obj->dmat, obj->dmamap);
1457 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1458 DMA_PRIV_UNLOCK(priv);
1459 uma_zfree(linux_dma_obj_zone, obj);
1460 return (0);
1461 }
1462 DMA_PRIV_UNLOCK(priv);
1463 return (obj->dma_addr);
1464 }
1465 #else
1466 static dma_addr_t
linux_dma_map_phys_common(struct device * dev __unused,vm_paddr_t phys,size_t len __unused,bus_dma_tag_t dmat __unused)1467 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys,
1468 size_t len __unused, bus_dma_tag_t dmat __unused)
1469 {
1470 return (phys);
1471 }
1472 #endif
1473
1474 dma_addr_t
linux_dma_map_phys(struct device * dev,vm_paddr_t phys,size_t len)1475 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len)
1476 {
1477 struct linux_dma_priv *priv;
1478
1479 priv = dev->dma_priv;
1480 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat));
1481 }
1482
1483 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
1484 void
linux_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t len)1485 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
1486 {
1487 struct linux_dma_priv *priv;
1488 struct linux_dma_obj *obj;
1489
1490 priv = dev->dma_priv;
1491
1492 if (pctrie_is_empty(&priv->ptree))
1493 return;
1494
1495 DMA_PRIV_LOCK(priv);
1496 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
1497 if (obj == NULL) {
1498 DMA_PRIV_UNLOCK(priv);
1499 return;
1500 }
1501 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr);
1502 bus_dmamap_unload(obj->dmat, obj->dmamap);
1503 bus_dmamap_destroy(obj->dmat, obj->dmamap);
1504 DMA_PRIV_UNLOCK(priv);
1505
1506 uma_zfree(linux_dma_obj_zone, obj);
1507 }
1508 #else
1509 void
linux_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t len)1510 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len)
1511 {
1512 }
1513 #endif
1514
1515 void *
linux_dma_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)1516 linux_dma_alloc_coherent(struct device *dev, size_t size,
1517 dma_addr_t *dma_handle, gfp_t flag)
1518 {
1519 struct linux_dma_priv *priv;
1520 vm_paddr_t high;
1521 size_t align;
1522 void *mem;
1523
1524 if (dev == NULL || dev->dma_priv == NULL) {
1525 *dma_handle = 0;
1526 return (NULL);
1527 }
1528 priv = dev->dma_priv;
1529 if (priv->dma_coherent_mask)
1530 high = priv->dma_coherent_mask;
1531 else
1532 /* Coherent is lower 32bit only by default in Linux. */
1533 high = BUS_SPACE_MAXADDR_32BIT;
1534 align = PAGE_SIZE << get_order(size);
1535 /* Always zero the allocation. */
1536 flag |= M_ZERO;
1537 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
1538 align, 0, VM_MEMATTR_DEFAULT);
1539 if (mem != NULL) {
1540 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
1541 priv->dmat_coherent);
1542 if (*dma_handle == 0) {
1543 kmem_free(mem, size);
1544 mem = NULL;
1545 }
1546 } else {
1547 *dma_handle = 0;
1548 }
1549 return (mem);
1550 }
1551
1552 struct lkpi_devres_dmam_coherent {
1553 size_t size;
1554 dma_addr_t *handle;
1555 void *mem;
1556 };
1557
1558 static void
lkpi_dmam_free_coherent(struct device * dev,void * p)1559 lkpi_dmam_free_coherent(struct device *dev, void *p)
1560 {
1561 struct lkpi_devres_dmam_coherent *dr;
1562
1563 dr = p;
1564 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle);
1565 }
1566
1567 void *
linuxkpi_dmam_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag)1568 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1569 gfp_t flag)
1570 {
1571 struct lkpi_devres_dmam_coherent *dr;
1572
1573 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent,
1574 sizeof(*dr), GFP_KERNEL | __GFP_ZERO);
1575
1576 if (dr == NULL)
1577 return (NULL);
1578
1579 dr->size = size;
1580 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag);
1581 dr->handle = dma_handle;
1582 if (dr->mem == NULL) {
1583 lkpi_devres_free(dr);
1584 return (NULL);
1585 }
1586
1587 lkpi_devres_add(dev, dr);
1588 return (dr->mem);
1589 }
1590
1591 void
linuxkpi_dma_sync(struct device * dev,dma_addr_t dma_addr,size_t size,bus_dmasync_op_t op)1592 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size,
1593 bus_dmasync_op_t op)
1594 {
1595 struct linux_dma_priv *priv;
1596 struct linux_dma_obj *obj;
1597
1598 priv = dev->dma_priv;
1599
1600 if (pctrie_is_empty(&priv->ptree))
1601 return;
1602
1603 DMA_PRIV_LOCK(priv);
1604 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr);
1605 if (obj == NULL) {
1606 DMA_PRIV_UNLOCK(priv);
1607 return;
1608 }
1609
1610 bus_dmamap_sync(obj->dmat, obj->dmamap, op);
1611 DMA_PRIV_UNLOCK(priv);
1612 }
1613
1614 int
linux_dma_map_sg_attrs(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction direction,unsigned long attrs __unused)1615 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents,
1616 enum dma_data_direction direction, unsigned long attrs __unused)
1617 {
1618 struct linux_dma_priv *priv;
1619 struct scatterlist *sg;
1620 int i, nseg;
1621 bus_dma_segment_t seg;
1622
1623 priv = dev->dma_priv;
1624
1625 DMA_PRIV_LOCK(priv);
1626
1627 /* create common DMA map in the first S/G entry */
1628 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) {
1629 DMA_PRIV_UNLOCK(priv);
1630 return (0);
1631 }
1632
1633 /* load all S/G list entries */
1634 for_each_sg(sgl, sg, nents, i) {
1635 nseg = -1;
1636 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map,
1637 sg_phys(sg), sg->length, BUS_DMA_NOWAIT,
1638 &seg, &nseg) != 0) {
1639 bus_dmamap_unload(priv->dmat, sgl->dma_map);
1640 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
1641 DMA_PRIV_UNLOCK(priv);
1642 return (0);
1643 }
1644 KASSERT(nseg == 0,
1645 ("More than one segment (nseg=%d)", nseg + 1));
1646
1647 sg_dma_address(sg) = seg.ds_addr;
1648 }
1649
1650 switch (direction) {
1651 case DMA_BIDIRECTIONAL:
1652 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
1653 break;
1654 case DMA_TO_DEVICE:
1655 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
1656 break;
1657 case DMA_FROM_DEVICE:
1658 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE);
1659 break;
1660 default:
1661 break;
1662 }
1663
1664 DMA_PRIV_UNLOCK(priv);
1665
1666 return (nents);
1667 }
1668
1669 void
linux_dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sgl,int nents __unused,enum dma_data_direction direction,unsigned long attrs __unused)1670 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
1671 int nents __unused, enum dma_data_direction direction,
1672 unsigned long attrs __unused)
1673 {
1674 struct linux_dma_priv *priv;
1675
1676 priv = dev->dma_priv;
1677
1678 DMA_PRIV_LOCK(priv);
1679
1680 switch (direction) {
1681 case DMA_BIDIRECTIONAL:
1682 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
1683 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD);
1684 break;
1685 case DMA_TO_DEVICE:
1686 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE);
1687 break;
1688 case DMA_FROM_DEVICE:
1689 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD);
1690 break;
1691 default:
1692 break;
1693 }
1694
1695 bus_dmamap_unload(priv->dmat, sgl->dma_map);
1696 bus_dmamap_destroy(priv->dmat, sgl->dma_map);
1697 DMA_PRIV_UNLOCK(priv);
1698 }
1699
1700 struct dma_pool {
1701 struct device *pool_device;
1702 uma_zone_t pool_zone;
1703 struct mtx pool_lock;
1704 bus_dma_tag_t pool_dmat;
1705 size_t pool_entry_size;
1706 struct pctrie pool_ptree;
1707 };
1708
1709 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock)
1710 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock)
1711
1712 static inline int
dma_pool_obj_ctor(void * mem,int size,void * arg,int flags)1713 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags)
1714 {
1715 struct linux_dma_obj *obj = mem;
1716 struct dma_pool *pool = arg;
1717 int error, nseg;
1718 bus_dma_segment_t seg;
1719
1720 nseg = -1;
1721 DMA_POOL_LOCK(pool);
1722 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap,
1723 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT,
1724 &seg, &nseg);
1725 DMA_POOL_UNLOCK(pool);
1726 if (error != 0) {
1727 return (error);
1728 }
1729 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg));
1730 obj->dma_addr = seg.ds_addr;
1731
1732 return (0);
1733 }
1734
1735 static void
dma_pool_obj_dtor(void * mem,int size,void * arg)1736 dma_pool_obj_dtor(void *mem, int size, void *arg)
1737 {
1738 struct linux_dma_obj *obj = mem;
1739 struct dma_pool *pool = arg;
1740
1741 DMA_POOL_LOCK(pool);
1742 bus_dmamap_unload(pool->pool_dmat, obj->dmamap);
1743 DMA_POOL_UNLOCK(pool);
1744 }
1745
1746 static int
dma_pool_obj_import(void * arg,void ** store,int count,int domain __unused,int flags)1747 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused,
1748 int flags)
1749 {
1750 struct dma_pool *pool = arg;
1751 struct linux_dma_obj *obj;
1752 int error, i;
1753
1754 for (i = 0; i < count; i++) {
1755 obj = uma_zalloc(linux_dma_obj_zone, flags);
1756 if (obj == NULL)
1757 break;
1758
1759 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr,
1760 BUS_DMA_NOWAIT, &obj->dmamap);
1761 if (error!= 0) {
1762 uma_zfree(linux_dma_obj_zone, obj);
1763 break;
1764 }
1765
1766 store[i] = obj;
1767 }
1768
1769 return (i);
1770 }
1771
1772 static void
dma_pool_obj_release(void * arg,void ** store,int count)1773 dma_pool_obj_release(void *arg, void **store, int count)
1774 {
1775 struct dma_pool *pool = arg;
1776 struct linux_dma_obj *obj;
1777 int i;
1778
1779 for (i = 0; i < count; i++) {
1780 obj = store[i];
1781 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap);
1782 uma_zfree(linux_dma_obj_zone, obj);
1783 }
1784 }
1785
1786 struct dma_pool *
linux_dma_pool_create(char * name,struct device * dev,size_t size,size_t align,size_t boundary)1787 linux_dma_pool_create(char *name, struct device *dev, size_t size,
1788 size_t align, size_t boundary)
1789 {
1790 struct linux_dma_priv *priv;
1791 struct dma_pool *pool;
1792
1793 priv = dev->dma_priv;
1794
1795 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
1796 pool->pool_device = dev;
1797 pool->pool_entry_size = size;
1798
1799 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev),
1800 align, boundary, /* alignment, boundary */
1801 priv->dma_mask, /* lowaddr */
1802 BUS_SPACE_MAXADDR, /* highaddr */
1803 NULL, NULL, /* filtfunc, filtfuncarg */
1804 size, /* maxsize */
1805 1, /* nsegments */
1806 size, /* maxsegsz */
1807 0, /* flags */
1808 NULL, NULL, /* lockfunc, lockfuncarg */
1809 &pool->pool_dmat)) {
1810 kfree(pool);
1811 return (NULL);
1812 }
1813
1814 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor,
1815 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import,
1816 dma_pool_obj_release, pool, 0);
1817
1818 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF);
1819 pctrie_init(&pool->pool_ptree);
1820
1821 return (pool);
1822 }
1823
1824 void
linux_dma_pool_destroy(struct dma_pool * pool)1825 linux_dma_pool_destroy(struct dma_pool *pool)
1826 {
1827
1828 uma_zdestroy(pool->pool_zone);
1829 bus_dma_tag_destroy(pool->pool_dmat);
1830 mtx_destroy(&pool->pool_lock);
1831 kfree(pool);
1832 }
1833
1834 void
lkpi_dmam_pool_destroy(struct device * dev,void * p)1835 lkpi_dmam_pool_destroy(struct device *dev, void *p)
1836 {
1837 struct dma_pool *pool;
1838
1839 pool = *(struct dma_pool **)p;
1840 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree);
1841 linux_dma_pool_destroy(pool);
1842 }
1843
1844 void *
linux_dma_pool_alloc(struct dma_pool * pool,gfp_t mem_flags,dma_addr_t * handle)1845 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
1846 dma_addr_t *handle)
1847 {
1848 struct linux_dma_obj *obj;
1849
1850 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK);
1851 if (obj == NULL)
1852 return (NULL);
1853
1854 DMA_POOL_LOCK(pool);
1855 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) {
1856 DMA_POOL_UNLOCK(pool);
1857 uma_zfree_arg(pool->pool_zone, obj, pool);
1858 return (NULL);
1859 }
1860 DMA_POOL_UNLOCK(pool);
1861
1862 *handle = obj->dma_addr;
1863 return (obj->vaddr);
1864 }
1865
1866 void
linux_dma_pool_free(struct dma_pool * pool,void * vaddr,dma_addr_t dma_addr)1867 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr)
1868 {
1869 struct linux_dma_obj *obj;
1870
1871 DMA_POOL_LOCK(pool);
1872 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr);
1873 if (obj == NULL) {
1874 DMA_POOL_UNLOCK(pool);
1875 return;
1876 }
1877 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr);
1878 DMA_POOL_UNLOCK(pool);
1879
1880 uma_zfree_arg(pool->pool_zone, obj, pool);
1881 }
1882
1883 static int
linux_backlight_get_status(device_t dev,struct backlight_props * props)1884 linux_backlight_get_status(device_t dev, struct backlight_props *props)
1885 {
1886 struct pci_dev *pdev;
1887
1888 linux_set_current(curthread);
1889 pdev = device_get_softc(dev);
1890
1891 props->brightness = pdev->dev.bd->props.brightness;
1892 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness;
1893 props->nlevels = 0;
1894
1895 return (0);
1896 }
1897
1898 static int
linux_backlight_get_info(device_t dev,struct backlight_info * info)1899 linux_backlight_get_info(device_t dev, struct backlight_info *info)
1900 {
1901 struct pci_dev *pdev;
1902
1903 linux_set_current(curthread);
1904 pdev = device_get_softc(dev);
1905
1906 info->type = BACKLIGHT_TYPE_PANEL;
1907 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH);
1908 return (0);
1909 }
1910
1911 static int
linux_backlight_update_status(device_t dev,struct backlight_props * props)1912 linux_backlight_update_status(device_t dev, struct backlight_props *props)
1913 {
1914 struct pci_dev *pdev;
1915
1916 linux_set_current(curthread);
1917 pdev = device_get_softc(dev);
1918
1919 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness *
1920 props->brightness / 100;
1921 pdev->dev.bd->props.power = props->brightness == 0 ?
1922 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */;
1923 return (pdev->dev.bd->ops->update_status(pdev->dev.bd));
1924 }
1925
1926 struct backlight_device *
linux_backlight_device_register(const char * name,struct device * dev,void * data,const struct backlight_ops * ops,struct backlight_properties * props)1927 linux_backlight_device_register(const char *name, struct device *dev,
1928 void *data, const struct backlight_ops *ops, struct backlight_properties *props)
1929 {
1930
1931 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO);
1932 dev->bd->ops = ops;
1933 dev->bd->props.type = props->type;
1934 dev->bd->props.max_brightness = props->max_brightness;
1935 dev->bd->props.brightness = props->brightness;
1936 dev->bd->props.power = props->power;
1937 dev->bd->data = data;
1938 dev->bd->dev = dev;
1939 dev->bd->name = strdup(name, M_DEVBUF);
1940
1941 dev->backlight_dev = backlight_register(name, dev->bsddev);
1942
1943 return (dev->bd);
1944 }
1945
1946 void
linux_backlight_device_unregister(struct backlight_device * bd)1947 linux_backlight_device_unregister(struct backlight_device *bd)
1948 {
1949
1950 backlight_destroy(bd->dev->backlight_dev);
1951 free(bd->name, M_DEVBUF);
1952 free(bd, M_DEVBUF);
1953 }
1954