xref: /freebsd/sys/dev/drm2/drm_os_freebsd.c (revision a0ee8cc6)
1 
2 #include <sys/cdefs.h>
3 __FBSDID("$FreeBSD$");
4 
5 #include <dev/drm2/drmP.h>
6 
7 #include <dev/agp/agpreg.h>
8 #include <dev/pci/pcireg.h>
9 
10 devclass_t drm_devclass;
11 
12 MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
13 MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
14 MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
15 MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
16 MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures");
17 MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
18 MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
19 MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
20 MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
21 MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
22 MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
23 MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
24 MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
25 MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
26 MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
27 MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
28 MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
29     "DRM CTXBITMAP Data Structures");
30 MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
31 MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
32 MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
33 MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
34 MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
35 
36 const char *fb_mode_option = NULL;
37 
38 #define NSEC_PER_USEC	1000L
39 #define NSEC_PER_SEC	1000000000L
40 
41 int64_t
42 timeval_to_ns(const struct timeval *tv)
43 {
44 	return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
45 		tv->tv_usec * NSEC_PER_USEC;
46 }
47 
48 struct timeval
49 ns_to_timeval(const int64_t nsec)
50 {
51         struct timeval tv;
52 	long rem;
53 
54 	if (nsec == 0) {
55 		tv.tv_sec = 0;
56 		tv.tv_usec = 0;
57 		return (tv);
58 	}
59 
60         tv.tv_sec = nsec / NSEC_PER_SEC;
61 	rem = nsec % NSEC_PER_SEC;
62         if (rem < 0) {
63                 tv.tv_sec--;
64                 rem += NSEC_PER_SEC;
65         }
66 	tv.tv_usec = rem / 1000;
67         return (tv);
68 }
69 
70 static drm_pci_id_list_t *
71 drm_find_description(int vendor, int device, drm_pci_id_list_t *idlist)
72 {
73 	int i = 0;
74 
75 	for (i = 0; idlist[i].vendor != 0; i++) {
76 		if ((idlist[i].vendor == vendor) &&
77 		    ((idlist[i].device == device) ||
78 		    (idlist[i].device == 0))) {
79 			return (&idlist[i]);
80 		}
81 	}
82 	return (NULL);
83 }
84 
85 /*
86  * drm_probe_helper: called by a driver at the end of its probe
87  * method.
88  */
89 int
90 drm_probe_helper(device_t kdev, drm_pci_id_list_t *idlist)
91 {
92 	drm_pci_id_list_t *id_entry;
93 	int vendor, device;
94 
95 	vendor = pci_get_vendor(kdev);
96 	device = pci_get_device(kdev);
97 
98 	if (pci_get_class(kdev) != PCIC_DISPLAY ||
99 	    (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
100 	     pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
101 		return (-ENXIO);
102 
103 	id_entry = drm_find_description(vendor, device, idlist);
104 	if (id_entry != NULL) {
105 		if (device_get_desc(kdev) == NULL) {
106 			DRM_DEBUG("%s desc: %s\n",
107 			    device_get_nameunit(kdev), id_entry->name);
108 			device_set_desc(kdev, id_entry->name);
109 		}
110 		return (0);
111 	}
112 
113 	return (-ENXIO);
114 }
115 
116 /*
117  * drm_attach_helper: called by a driver at the end of its attach
118  * method.
119  */
120 int
121 drm_attach_helper(device_t kdev, drm_pci_id_list_t *idlist,
122     struct drm_driver *driver)
123 {
124 	struct drm_device *dev;
125 	int vendor, device;
126 	int ret;
127 
128 	dev = device_get_softc(kdev);
129 
130 	vendor = pci_get_vendor(kdev);
131 	device = pci_get_device(kdev);
132 	dev->id_entry = drm_find_description(vendor, device, idlist);
133 
134 	ret = drm_get_pci_dev(kdev, dev, driver);
135 
136 	return (ret);
137 }
138 
139 int
140 drm_generic_detach(device_t kdev)
141 {
142 	struct drm_device *dev;
143 	int i;
144 
145 	dev = device_get_softc(kdev);
146 
147 	drm_put_dev(dev);
148 
149 	/* Clean up PCI resources allocated by drm_bufs.c.  We're not really
150 	 * worried about resource consumption while the DRM is inactive (between
151 	 * lastclose and firstopen or unload) because these aren't actually
152 	 * taking up KVA, just keeping the PCI resource allocated.
153 	 */
154 	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
155 		if (dev->pcir[i] == NULL)
156 			continue;
157 		bus_release_resource(dev->dev, SYS_RES_MEMORY,
158 		    dev->pcirid[i], dev->pcir[i]);
159 		dev->pcir[i] = NULL;
160 	}
161 
162 	if (pci_disable_busmaster(dev->dev))
163 		DRM_ERROR("Request to disable bus-master failed.\n");
164 
165 	return (0);
166 }
167 
168 int
169 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
170     struct sysctl_oid *top)
171 {
172 	struct sysctl_oid *oid;
173 
174 	snprintf(dev->busid_str, sizeof(dev->busid_str),
175 	     "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
176 	     dev->pci_slot, dev->pci_func);
177 	oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
178 	    CTLFLAG_RD, dev->busid_str, 0, NULL);
179 	if (oid == NULL)
180 		return (-ENOMEM);
181 	dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
182 	oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
183 	    "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
184 	if (oid == NULL)
185 		return (-ENOMEM);
186 
187 	return (0);
188 }
189 
190 static int
191 drm_device_find_capability(struct drm_device *dev, int cap)
192 {
193 
194 	return (pci_find_cap(dev->dev, cap, NULL) == 0);
195 }
196 
197 int
198 drm_pci_device_is_agp(struct drm_device *dev)
199 {
200 	if (dev->driver->device_is_agp != NULL) {
201 		int ret;
202 
203 		/* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
204 		 * AGP, 2 = fall back to PCI capability
205 		 */
206 		ret = (*dev->driver->device_is_agp)(dev);
207 		if (ret != DRM_MIGHT_BE_AGP)
208 			return ret;
209 	}
210 
211 	return (drm_device_find_capability(dev, PCIY_AGP));
212 }
213 
214 int
215 drm_pci_device_is_pcie(struct drm_device *dev)
216 {
217 
218 	return (drm_device_find_capability(dev, PCIY_EXPRESS));
219 }
220 
221 static bool
222 dmi_found(const struct dmi_system_id *dsi)
223 {
224 	char *hw_vendor, *hw_prod;
225 	int i, slot;
226 	bool res;
227 
228 	hw_vendor = kern_getenv("smbios.planar.maker");
229 	hw_prod = kern_getenv("smbios.planar.product");
230 	res = true;
231 	for (i = 0; i < nitems(dsi->matches); i++) {
232 		slot = dsi->matches[i].slot;
233 		switch (slot) {
234 		case DMI_NONE:
235 			break;
236 		case DMI_SYS_VENDOR:
237 		case DMI_BOARD_VENDOR:
238 			if (hw_vendor != NULL &&
239 			    !strcmp(hw_vendor, dsi->matches[i].substr)) {
240 				break;
241 			} else {
242 				res = false;
243 				goto out;
244 			}
245 		case DMI_PRODUCT_NAME:
246 		case DMI_BOARD_NAME:
247 			if (hw_prod != NULL &&
248 			    !strcmp(hw_prod, dsi->matches[i].substr)) {
249 				break;
250 			} else {
251 				res = false;
252 				goto out;
253 			}
254 		default:
255 			res = false;
256 			goto out;
257 		}
258 	}
259 out:
260 	freeenv(hw_vendor);
261 	freeenv(hw_prod);
262 
263 	return (res);
264 }
265 
266 bool
267 dmi_check_system(const struct dmi_system_id *sysid)
268 {
269 	const struct dmi_system_id *dsi;
270 	bool res;
271 
272 	for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
273 		if (dmi_found(dsi)) {
274 			res = true;
275 			if (dsi->callback != NULL && dsi->callback(dsi))
276 				break;
277 		}
278 	}
279 	return (res);
280 }
281 
282 int
283 drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags)
284 {
285 	int act;
286 	struct mem_range_desc mrdesc;
287 
288 	mrdesc.mr_base = offset;
289 	mrdesc.mr_len = size;
290 	mrdesc.mr_flags = flags;
291 	act = MEMRANGE_SET_UPDATE;
292 	strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
293 	return (-mem_range_attr_set(&mrdesc, &act));
294 }
295 
296 int
297 drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size,
298     unsigned int flags)
299 {
300 	int act;
301 	struct mem_range_desc mrdesc;
302 
303 	mrdesc.mr_base = offset;
304 	mrdesc.mr_len = size;
305 	mrdesc.mr_flags = flags;
306 	act = MEMRANGE_SET_REMOVE;
307 	strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
308 	return (-mem_range_attr_set(&mrdesc, &act));
309 }
310 
311 void
312 drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
313 {
314 
315 #if defined(__i386__) || defined(__amd64__)
316 	pmap_invalidate_cache_pages(pages, num_pages);
317 #else
318 	DRM_ERROR("drm_clflush_pages not implemented on this architecture");
319 #endif
320 }
321 
322 void
323 drm_clflush_virt_range(char *addr, unsigned long length)
324 {
325 
326 #if defined(__i386__) || defined(__amd64__)
327 	pmap_invalidate_cache_range((vm_offset_t)addr,
328 	    (vm_offset_t)addr + length, TRUE);
329 #else
330 	DRM_ERROR("drm_clflush_virt_range not implemented on this architecture");
331 #endif
332 }
333 
334 #if DRM_LINUX
335 
336 #include <sys/sysproto.h>
337 
338 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
339 
340 #define LINUX_IOCTL_DRM_MIN		0x6400
341 #define LINUX_IOCTL_DRM_MAX		0x64ff
342 
343 static linux_ioctl_function_t drm_linux_ioctl;
344 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
345     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
346 
347 /* The bits for in/out are switched on Linux */
348 #define LINUX_IOC_IN	IOC_OUT
349 #define LINUX_IOC_OUT	IOC_IN
350 
351 static int
352 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
353 {
354 	int error;
355 	int cmd = args->cmd;
356 
357 	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
358 	if (cmd & LINUX_IOC_IN)
359 		args->cmd |= IOC_IN;
360 	if (cmd & LINUX_IOC_OUT)
361 		args->cmd |= IOC_OUT;
362 
363 	error = ioctl(p, (struct ioctl_args *)args);
364 
365 	return error;
366 }
367 #endif /* DRM_LINUX */
368 
369 static int
370 drm_modevent(module_t mod, int type, void *data)
371 {
372 
373 	switch (type) {
374 	case MOD_LOAD:
375 		TUNABLE_INT_FETCH("drm.debug", &drm_debug);
376 		TUNABLE_INT_FETCH("drm.notyet", &drm_notyet);
377 		break;
378 	}
379 	return (0);
380 }
381 
382 static moduledata_t drm_mod = {
383 	"drmn",
384 	drm_modevent,
385 	0
386 };
387 
388 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
389 MODULE_VERSION(drmn, 1);
390 MODULE_DEPEND(drmn, agp, 1, 1, 1);
391 MODULE_DEPEND(drmn, pci, 1, 1, 1);
392 MODULE_DEPEND(drmn, mem, 1, 1, 1);
393 MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
394