1
2 #include <sys/cdefs.h>
3 __FBSDID("$FreeBSD$");
4
5 #include <dev/drm2/drmP.h>
6
7 #include <dev/agp/agpreg.h>
8 #include <dev/pci/pcireg.h>
9
10 devclass_t drm_devclass;
11
12 MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
13 MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
14 MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
15 MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
16 MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures");
17 MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
18 MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
19 MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
20 MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
21 MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
22 MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
23 MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
24 MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
25 MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
26 MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
27 MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
28 MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
29 "DRM CTXBITMAP Data Structures");
30 MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
31 MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
32 MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
33 MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
34 MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
35
36 const char *fb_mode_option = NULL;
37
38 #define NSEC_PER_USEC 1000L
39 #define NSEC_PER_SEC 1000000000L
40
41 int64_t
42 timeval_to_ns(const struct timeval *tv)
43 {
44 return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
45 tv->tv_usec * NSEC_PER_USEC;
46 }
47
48 struct timeval
49 ns_to_timeval(const int64_t nsec)
50 {
51 struct timeval tv;
52 long rem;
53
54 if (nsec == 0) {
55 tv.tv_sec = 0;
56 tv.tv_usec = 0;
57 return (tv);
58 }
59
60 tv.tv_sec = nsec / NSEC_PER_SEC;
61 rem = nsec % NSEC_PER_SEC;
62 if (rem < 0) {
63 tv.tv_sec--;
64 rem += NSEC_PER_SEC;
65 }
66 tv.tv_usec = rem / 1000;
67 return (tv);
68 }
69
70 /* Copied from OFED. */
71 unsigned long drm_linux_timer_hz_mask;
72
73 static void
74 drm_linux_timer_init(void *arg)
75 {
76
77 /*
78 * Compute an internal HZ value which can divide 2**32 to
79 * avoid timer rounding problems when the tick value wraps
80 * around 2**32:
81 */
82 drm_linux_timer_hz_mask = 1;
83 while (drm_linux_timer_hz_mask < (unsigned long)hz)
84 drm_linux_timer_hz_mask *= 2;
85 drm_linux_timer_hz_mask--;
86 }
87 SYSINIT(drm_linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, drm_linux_timer_init, NULL);
88
89 static const drm_pci_id_list_t *
90 drm_find_description(int vendor, int device, const drm_pci_id_list_t *idlist)
91 {
92 int i = 0;
93
94 for (i = 0; idlist[i].vendor != 0; i++) {
95 if ((idlist[i].vendor == vendor) &&
96 ((idlist[i].device == device) ||
97 (idlist[i].device == 0))) {
98 return (&idlist[i]);
99 }
100 }
101 return (NULL);
102 }
103
104 /*
105 * drm_probe_helper: called by a driver at the end of its probe
106 * method.
107 */
108 int
109 drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist)
110 {
111 const drm_pci_id_list_t *id_entry;
112 int vendor, device;
113
114 vendor = pci_get_vendor(kdev);
115 device = pci_get_device(kdev);
116
117 if (pci_get_class(kdev) != PCIC_DISPLAY ||
118 (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
119 pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
120 return (-ENXIO);
121
122 id_entry = drm_find_description(vendor, device, idlist);
123 if (id_entry != NULL) {
124 if (device_get_desc(kdev) == NULL) {
125 DRM_DEBUG("%s desc: %s\n",
126 device_get_nameunit(kdev), id_entry->name);
127 device_set_desc(kdev, id_entry->name);
128 }
129 #if !defined(__arm__)
130 DRM_OBSOLETE(kdev);
131 #endif
132 return (-BUS_PROBE_GENERIC);
133 }
134
135 return (-ENXIO);
136 }
137
138 /*
139 * drm_attach_helper: called by a driver at the end of its attach
140 * method.
141 */
142 int
143 drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist,
144 struct drm_driver *driver)
145 {
146 struct drm_device *dev;
147 int vendor, device;
148 int ret;
149
150 dev = device_get_softc(kdev);
151
152 vendor = pci_get_vendor(kdev);
153 device = pci_get_device(kdev);
154 dev->id_entry = drm_find_description(vendor, device, idlist);
155
156 ret = drm_get_pci_dev(kdev, dev, driver);
157
158 return (ret);
159 }
160
161 int
162 drm_generic_suspend(device_t kdev)
163 {
164 struct drm_device *dev;
165 int error;
166
167 DRM_DEBUG_KMS("Starting suspend\n");
168
169 dev = device_get_softc(kdev);
170 if (dev->driver->suspend) {
171 pm_message_t state;
172
173 state.event = PM_EVENT_SUSPEND;
174 error = -dev->driver->suspend(dev, state);
175 if (error)
176 goto out;
177 }
178
179 error = bus_generic_suspend(kdev);
180
181 out:
182 DRM_DEBUG_KMS("Finished suspend: %d\n", error);
183
184 return error;
185 }
186
187 int
188 drm_generic_resume(device_t kdev)
189 {
190 struct drm_device *dev;
191 int error;
192
193 DRM_DEBUG_KMS("Starting resume\n");
194
195 dev = device_get_softc(kdev);
196 if (dev->driver->resume) {
197 error = -dev->driver->resume(dev);
198 if (error)
199 goto out;
200 }
201
202 error = bus_generic_resume(kdev);
203
204 out:
205 DRM_DEBUG_KMS("Finished resume: %d\n", error);
206
207 return error;
208 }
209
210 int
211 drm_generic_detach(device_t kdev)
212 {
213 struct drm_device *dev;
214 int i;
215
216 dev = device_get_softc(kdev);
217
218 drm_put_dev(dev);
219
220 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
221 * worried about resource consumption while the DRM is inactive (between
222 * lastclose and firstopen or unload) because these aren't actually
223 * taking up KVA, just keeping the PCI resource allocated.
224 */
225 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
226 if (dev->pcir[i] == NULL)
227 continue;
228 bus_release_resource(dev->dev, SYS_RES_MEMORY,
229 dev->pcirid[i], dev->pcir[i]);
230 dev->pcir[i] = NULL;
231 }
232
233 if (pci_disable_busmaster(dev->dev))
234 DRM_ERROR("Request to disable bus-master failed.\n");
235
236 return (0);
237 }
238
239 int
240 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
241 struct sysctl_oid *top)
242 {
243 struct sysctl_oid *oid;
244
245 snprintf(dev->busid_str, sizeof(dev->busid_str),
246 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
247 dev->pci_slot, dev->pci_func);
248 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
249 CTLFLAG_RD, dev->busid_str, 0, NULL);
250 if (oid == NULL)
251 return (-ENOMEM);
252 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
253 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
254 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
255 if (oid == NULL)
256 return (-ENOMEM);
257
258 return (0);
259 }
260
261 static int
262 drm_device_find_capability(struct drm_device *dev, int cap)
263 {
264
265 return (pci_find_cap(dev->dev, cap, NULL) == 0);
266 }
267
268 int
269 drm_pci_device_is_agp(struct drm_device *dev)
270 {
271 if (dev->driver->device_is_agp != NULL) {
272 int ret;
273
274 /* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
275 * AGP, 2 = fall back to PCI capability
276 */
277 ret = (*dev->driver->device_is_agp)(dev);
278 if (ret != DRM_MIGHT_BE_AGP)
279 return ret;
280 }
281
282 return (drm_device_find_capability(dev, PCIY_AGP));
283 }
284
285 int
286 drm_pci_device_is_pcie(struct drm_device *dev)
287 {
288
289 return (drm_device_find_capability(dev, PCIY_EXPRESS));
290 }
291
292 static bool
293 dmi_found(const struct dmi_system_id *dsi)
294 {
295 char *hw_vendor, *hw_prod;
296 int i, slot;
297 bool res;
298
299 hw_vendor = kern_getenv("smbios.planar.maker");
300 hw_prod = kern_getenv("smbios.planar.product");
301 res = true;
302 for (i = 0; i < nitems(dsi->matches); i++) {
303 slot = dsi->matches[i].slot;
304 switch (slot) {
305 case DMI_NONE:
306 break;
307 case DMI_SYS_VENDOR:
308 case DMI_BOARD_VENDOR:
309 if (hw_vendor != NULL &&
310 !strcmp(hw_vendor, dsi->matches[i].substr)) {
311 break;
312 } else {
313 res = false;
314 goto out;
315 }
316 case DMI_PRODUCT_NAME:
317 case DMI_BOARD_NAME:
318 if (hw_prod != NULL &&
319 !strcmp(hw_prod, dsi->matches[i].substr)) {
320 break;
321 } else {
322 res = false;
323 goto out;
324 }
325 default:
326 res = false;
327 goto out;
328 }
329 }
330 out:
331 freeenv(hw_vendor);
332 freeenv(hw_prod);
333
334 return (res);
335 }
336
337 bool
338 dmi_check_system(const struct dmi_system_id *sysid)
339 {
340 const struct dmi_system_id *dsi;
341 bool res;
342
343 for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
344 if (dmi_found(dsi)) {
345 res = true;
346 if (dsi->callback != NULL && dsi->callback(dsi))
347 break;
348 }
349 }
350 return (res);
351 }
352
353 #if __OS_HAS_MTRR
354 int
355 drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags)
356 {
357 int act;
358 struct mem_range_desc mrdesc;
359
360 mrdesc.mr_base = offset;
361 mrdesc.mr_len = size;
362 mrdesc.mr_flags = flags;
363 act = MEMRANGE_SET_UPDATE;
364 strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
365 return (-mem_range_attr_set(&mrdesc, &act));
366 }
367
368 int
369 drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size,
370 unsigned int flags)
371 {
372 int act;
373 struct mem_range_desc mrdesc;
374
375 mrdesc.mr_base = offset;
376 mrdesc.mr_len = size;
377 mrdesc.mr_flags = flags;
378 act = MEMRANGE_SET_REMOVE;
379 strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
380 return (-mem_range_attr_set(&mrdesc, &act));
381 }
382 #endif
383
384 void
385 drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
386 {
387
388 #if defined(__i386__) || defined(__amd64__)
389 pmap_invalidate_cache_pages(pages, num_pages);
390 #else
391 DRM_ERROR("drm_clflush_pages not implemented on this architecture");
392 #endif
393 }
394
395 void
396 drm_clflush_virt_range(char *addr, unsigned long length)
397 {
398
399 #if defined(__i386__) || defined(__amd64__)
400 pmap_force_invalidate_cache_range((vm_offset_t)addr,
401 (vm_offset_t)addr + length);
402 #else
403 DRM_ERROR("drm_clflush_virt_range not implemented on this architecture");
404 #endif
405 }
406
407 void
408 hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
409 char *linebuf, size_t linebuflen, bool ascii __unused)
410 {
411 int i, j, c;
412
413 i = j = 0;
414
415 while (i < len && j <= linebuflen) {
416 c = ((const char *)buf)[i];
417
418 if (i != 0) {
419 if (i % rowsize == 0) {
420 /* Newline required. */
421 sprintf(linebuf + j, "\n");
422 ++j;
423 } else if (i % groupsize == 0) {
424 /* Space required. */
425 sprintf(linebuf + j, " ");
426 ++j;
427 }
428 }
429
430 if (j > linebuflen - 4)
431 break;
432
433 sprintf(linebuf + j, "%02X", c);
434 j += 2;
435
436 ++i;
437 }
438
439 if (j <= linebuflen)
440 sprintf(linebuf + j, "\n");
441 }
442
443 #if DRM_LINUX
444
445 #include <sys/sysproto.h>
446
447 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
448
449 #define LINUX_IOCTL_DRM_MIN 0x6400
450 #define LINUX_IOCTL_DRM_MAX 0x64ff
451
452 static linux_ioctl_function_t drm_linux_ioctl;
453 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
454 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
455
456 /* The bits for in/out are switched on Linux */
457 #define LINUX_IOC_IN IOC_OUT
458 #define LINUX_IOC_OUT IOC_IN
459
460 static int
461 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
462 {
463 int error;
464 int cmd = args->cmd;
465
466 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
467 if (cmd & LINUX_IOC_IN)
468 args->cmd |= IOC_IN;
469 if (cmd & LINUX_IOC_OUT)
470 args->cmd |= IOC_OUT;
471
472 error = ioctl(p, (struct ioctl_args *)args);
473
474 return error;
475 }
476 #endif /* DRM_LINUX */
477
478 static int
479 drm_modevent(module_t mod, int type, void *data)
480 {
481
482 switch (type) {
483 case MOD_LOAD:
484 TUNABLE_INT_FETCH("drm.debug", &drm_debug);
485 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet);
486 break;
487 }
488 return (0);
489 }
490
491 static moduledata_t drm_mod = {
492 "drmn",
493 drm_modevent,
494 0
495 };
496
497 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
498 MODULE_VERSION(drmn, 1);
499 MODULE_DEPEND(drmn, agp, 1, 1, 1);
500 MODULE_DEPEND(drmn, pci, 1, 1, 1);
501 MODULE_DEPEND(drmn, mem, 1, 1, 1);
502 MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
Cache object: 3f50afae0be3e924a63669fb0d649b6b
|