1
2 #include <sys/cdefs.h>
3 __FBSDID("$FreeBSD$");
4
5 #include <dev/drm2/drmP.h>
6
7 #include <dev/agp/agpreg.h>
8 #include <dev/pci/pcireg.h>
9
10 MALLOC_DEFINE(DRM_MEM_DMA, "drm_dma", "DRM DMA Data Structures");
11 MALLOC_DEFINE(DRM_MEM_SAREA, "drm_sarea", "DRM SAREA Data Structures");
12 MALLOC_DEFINE(DRM_MEM_DRIVER, "drm_driver", "DRM DRIVER Data Structures");
13 MALLOC_DEFINE(DRM_MEM_MAGIC, "drm_magic", "DRM MAGIC Data Structures");
14 MALLOC_DEFINE(DRM_MEM_MINOR, "drm_minor", "DRM MINOR Data Structures");
15 MALLOC_DEFINE(DRM_MEM_IOCTLS, "drm_ioctls", "DRM IOCTL Data Structures");
16 MALLOC_DEFINE(DRM_MEM_MAPS, "drm_maps", "DRM MAP Data Structures");
17 MALLOC_DEFINE(DRM_MEM_BUFS, "drm_bufs", "DRM BUFFER Data Structures");
18 MALLOC_DEFINE(DRM_MEM_SEGS, "drm_segs", "DRM SEGMENTS Data Structures");
19 MALLOC_DEFINE(DRM_MEM_PAGES, "drm_pages", "DRM PAGES Data Structures");
20 MALLOC_DEFINE(DRM_MEM_FILES, "drm_files", "DRM FILE Data Structures");
21 MALLOC_DEFINE(DRM_MEM_QUEUES, "drm_queues", "DRM QUEUE Data Structures");
22 MALLOC_DEFINE(DRM_MEM_CMDS, "drm_cmds", "DRM COMMAND Data Structures");
23 MALLOC_DEFINE(DRM_MEM_MAPPINGS, "drm_mapping", "DRM MAPPING Data Structures");
24 MALLOC_DEFINE(DRM_MEM_BUFLISTS, "drm_buflists", "DRM BUFLISTS Data Structures");
25 MALLOC_DEFINE(DRM_MEM_AGPLISTS, "drm_agplists", "DRM AGPLISTS Data Structures");
26 MALLOC_DEFINE(DRM_MEM_CTXBITMAP, "drm_ctxbitmap",
27 "DRM CTXBITMAP Data Structures");
28 MALLOC_DEFINE(DRM_MEM_SGLISTS, "drm_sglists", "DRM SGLISTS Data Structures");
29 MALLOC_DEFINE(DRM_MEM_MM, "drm_sman", "DRM MEMORY MANAGER Data Structures");
30 MALLOC_DEFINE(DRM_MEM_HASHTAB, "drm_hashtab", "DRM HASHTABLE Data Structures");
31 MALLOC_DEFINE(DRM_MEM_KMS, "drm_kms", "DRM KMS Data Structures");
32 MALLOC_DEFINE(DRM_MEM_VBLANK, "drm_vblank", "DRM VBLANK Handling Data");
33
34 const char *fb_mode_option = NULL;
35
36 #define NSEC_PER_USEC 1000L
37 #define NSEC_PER_SEC 1000000000L
38
39 int64_t
40 timeval_to_ns(const struct timeval *tv)
41 {
42 return ((int64_t)tv->tv_sec * NSEC_PER_SEC) +
43 tv->tv_usec * NSEC_PER_USEC;
44 }
45
46 struct timeval
47 ns_to_timeval(const int64_t nsec)
48 {
49 struct timeval tv;
50 long rem;
51
52 if (nsec == 0) {
53 tv.tv_sec = 0;
54 tv.tv_usec = 0;
55 return (tv);
56 }
57
58 tv.tv_sec = nsec / NSEC_PER_SEC;
59 rem = nsec % NSEC_PER_SEC;
60 if (rem < 0) {
61 tv.tv_sec--;
62 rem += NSEC_PER_SEC;
63 }
64 tv.tv_usec = rem / 1000;
65 return (tv);
66 }
67
68 /* Copied from OFED. */
69 unsigned long drm_linux_timer_hz_mask;
70
71 static void
72 drm_linux_timer_init(void *arg)
73 {
74
75 /*
76 * Compute an internal HZ value which can divide 2**32 to
77 * avoid timer rounding problems when the tick value wraps
78 * around 2**32:
79 */
80 drm_linux_timer_hz_mask = 1;
81 while (drm_linux_timer_hz_mask < (unsigned long)hz)
82 drm_linux_timer_hz_mask *= 2;
83 drm_linux_timer_hz_mask--;
84 }
85 SYSINIT(drm_linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, drm_linux_timer_init, NULL);
86
87 static const drm_pci_id_list_t *
88 drm_find_description(int vendor, int device, const drm_pci_id_list_t *idlist)
89 {
90 int i = 0;
91
92 for (i = 0; idlist[i].vendor != 0; i++) {
93 if ((idlist[i].vendor == vendor) &&
94 ((idlist[i].device == device) ||
95 (idlist[i].device == 0))) {
96 return (&idlist[i]);
97 }
98 }
99 return (NULL);
100 }
101
102 /*
103 * drm_probe_helper: called by a driver at the end of its probe
104 * method.
105 */
106 int
107 drm_probe_helper(device_t kdev, const drm_pci_id_list_t *idlist)
108 {
109 const drm_pci_id_list_t *id_entry;
110 int vendor, device;
111
112 vendor = pci_get_vendor(kdev);
113 device = pci_get_device(kdev);
114
115 if (pci_get_class(kdev) != PCIC_DISPLAY ||
116 (pci_get_subclass(kdev) != PCIS_DISPLAY_VGA &&
117 pci_get_subclass(kdev) != PCIS_DISPLAY_OTHER))
118 return (-ENXIO);
119
120 id_entry = drm_find_description(vendor, device, idlist);
121 if (id_entry != NULL) {
122 if (device_get_desc(kdev) == NULL) {
123 DRM_DEBUG("%s desc: %s\n",
124 device_get_nameunit(kdev), id_entry->name);
125 device_set_desc(kdev, id_entry->name);
126 }
127 #if !defined(__arm__)
128 DRM_OBSOLETE(kdev);
129 #endif
130 return (-BUS_PROBE_GENERIC);
131 }
132
133 return (-ENXIO);
134 }
135
136 /*
137 * drm_attach_helper: called by a driver at the end of its attach
138 * method.
139 */
140 int
141 drm_attach_helper(device_t kdev, const drm_pci_id_list_t *idlist,
142 struct drm_driver *driver)
143 {
144 struct drm_device *dev;
145 int vendor, device;
146 int ret;
147
148 dev = device_get_softc(kdev);
149
150 vendor = pci_get_vendor(kdev);
151 device = pci_get_device(kdev);
152 dev->id_entry = drm_find_description(vendor, device, idlist);
153
154 ret = drm_get_pci_dev(kdev, dev, driver);
155
156 return (ret);
157 }
158
159 int
160 drm_generic_suspend(device_t kdev)
161 {
162 struct drm_device *dev;
163 int error;
164
165 DRM_DEBUG_KMS("Starting suspend\n");
166
167 dev = device_get_softc(kdev);
168 if (dev->driver->suspend) {
169 pm_message_t state;
170
171 state.event = PM_EVENT_SUSPEND;
172 error = -dev->driver->suspend(dev, state);
173 if (error)
174 goto out;
175 }
176
177 error = bus_generic_suspend(kdev);
178
179 out:
180 DRM_DEBUG_KMS("Finished suspend: %d\n", error);
181
182 return error;
183 }
184
185 int
186 drm_generic_resume(device_t kdev)
187 {
188 struct drm_device *dev;
189 int error;
190
191 DRM_DEBUG_KMS("Starting resume\n");
192
193 dev = device_get_softc(kdev);
194 if (dev->driver->resume) {
195 error = -dev->driver->resume(dev);
196 if (error)
197 goto out;
198 }
199
200 error = bus_generic_resume(kdev);
201
202 out:
203 DRM_DEBUG_KMS("Finished resume: %d\n", error);
204
205 return error;
206 }
207
208 int
209 drm_generic_detach(device_t kdev)
210 {
211 struct drm_device *dev;
212 int i;
213
214 dev = device_get_softc(kdev);
215
216 drm_put_dev(dev);
217
218 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
219 * worried about resource consumption while the DRM is inactive (between
220 * lastclose and firstopen or unload) because these aren't actually
221 * taking up KVA, just keeping the PCI resource allocated.
222 */
223 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
224 if (dev->pcir[i] == NULL)
225 continue;
226 bus_release_resource(dev->dev, SYS_RES_MEMORY,
227 dev->pcirid[i], dev->pcir[i]);
228 dev->pcir[i] = NULL;
229 }
230
231 if (pci_disable_busmaster(dev->dev))
232 DRM_ERROR("Request to disable bus-master failed.\n");
233
234 return (0);
235 }
236
237 int
238 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
239 struct sysctl_oid *top)
240 {
241 struct sysctl_oid *oid;
242
243 snprintf(dev->busid_str, sizeof(dev->busid_str),
244 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
245 dev->pci_slot, dev->pci_func);
246 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
247 CTLFLAG_RD, dev->busid_str, 0, NULL);
248 if (oid == NULL)
249 return (-ENOMEM);
250 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
251 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
252 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
253 if (oid == NULL)
254 return (-ENOMEM);
255
256 return (0);
257 }
258
259 static int
260 drm_device_find_capability(struct drm_device *dev, int cap)
261 {
262
263 return (pci_find_cap(dev->dev, cap, NULL) == 0);
264 }
265
266 int
267 drm_pci_device_is_agp(struct drm_device *dev)
268 {
269 if (dev->driver->device_is_agp != NULL) {
270 int ret;
271
272 /* device_is_agp returns a tristate, 0 = not AGP, 1 = definitely
273 * AGP, 2 = fall back to PCI capability
274 */
275 ret = (*dev->driver->device_is_agp)(dev);
276 if (ret != DRM_MIGHT_BE_AGP)
277 return ret;
278 }
279
280 return (drm_device_find_capability(dev, PCIY_AGP));
281 }
282
283 int
284 drm_pci_device_is_pcie(struct drm_device *dev)
285 {
286
287 return (drm_device_find_capability(dev, PCIY_EXPRESS));
288 }
289
290 static bool
291 dmi_found(const struct dmi_system_id *dsi)
292 {
293 char *hw_vendor, *hw_prod;
294 int i, slot;
295 bool res;
296
297 hw_vendor = kern_getenv("smbios.planar.maker");
298 hw_prod = kern_getenv("smbios.planar.product");
299 res = true;
300 for (i = 0; i < nitems(dsi->matches); i++) {
301 slot = dsi->matches[i].slot;
302 switch (slot) {
303 case DMI_NONE:
304 break;
305 case DMI_SYS_VENDOR:
306 case DMI_BOARD_VENDOR:
307 if (hw_vendor != NULL &&
308 !strcmp(hw_vendor, dsi->matches[i].substr)) {
309 break;
310 } else {
311 res = false;
312 goto out;
313 }
314 case DMI_PRODUCT_NAME:
315 case DMI_BOARD_NAME:
316 if (hw_prod != NULL &&
317 !strcmp(hw_prod, dsi->matches[i].substr)) {
318 break;
319 } else {
320 res = false;
321 goto out;
322 }
323 default:
324 res = false;
325 goto out;
326 }
327 }
328 out:
329 freeenv(hw_vendor);
330 freeenv(hw_prod);
331
332 return (res);
333 }
334
335 bool
336 dmi_check_system(const struct dmi_system_id *sysid)
337 {
338 const struct dmi_system_id *dsi;
339 bool res;
340
341 for (res = false, dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
342 if (dmi_found(dsi)) {
343 res = true;
344 if (dsi->callback != NULL && dsi->callback(dsi))
345 break;
346 }
347 }
348 return (res);
349 }
350
351 #if __OS_HAS_MTRR
352 int
353 drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags)
354 {
355 int act;
356 struct mem_range_desc mrdesc;
357
358 mrdesc.mr_base = offset;
359 mrdesc.mr_len = size;
360 mrdesc.mr_flags = flags;
361 act = MEMRANGE_SET_UPDATE;
362 strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
363 return (-mem_range_attr_set(&mrdesc, &act));
364 }
365
366 int
367 drm_mtrr_del(int handle __unused, unsigned long offset, unsigned long size,
368 unsigned int flags)
369 {
370 int act;
371 struct mem_range_desc mrdesc;
372
373 mrdesc.mr_base = offset;
374 mrdesc.mr_len = size;
375 mrdesc.mr_flags = flags;
376 act = MEMRANGE_SET_REMOVE;
377 strlcpy(mrdesc.mr_owner, "drm", sizeof(mrdesc.mr_owner));
378 return (-mem_range_attr_set(&mrdesc, &act));
379 }
380 #endif
381
382 void
383 drm_clflush_pages(vm_page_t *pages, unsigned long num_pages)
384 {
385
386 #if defined(__i386__) || defined(__amd64__)
387 pmap_invalidate_cache_pages(pages, num_pages);
388 #else
389 DRM_ERROR("drm_clflush_pages not implemented on this architecture");
390 #endif
391 }
392
393 void
394 drm_clflush_virt_range(char *addr, unsigned long length)
395 {
396
397 #if defined(__i386__) || defined(__amd64__)
398 pmap_force_invalidate_cache_range((vm_offset_t)addr,
399 (vm_offset_t)addr + length);
400 #else
401 DRM_ERROR("drm_clflush_virt_range not implemented on this architecture");
402 #endif
403 }
404
405 void
406 hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
407 char *linebuf, size_t linebuflen, bool ascii __unused)
408 {
409 int i, j, c;
410
411 i = j = 0;
412
413 while (i < len && j <= linebuflen) {
414 c = ((const char *)buf)[i];
415
416 if (i != 0) {
417 if (i % rowsize == 0) {
418 /* Newline required. */
419 sprintf(linebuf + j, "\n");
420 ++j;
421 } else if (i % groupsize == 0) {
422 /* Space required. */
423 sprintf(linebuf + j, " ");
424 ++j;
425 }
426 }
427
428 if (j > linebuflen - 4)
429 break;
430
431 sprintf(linebuf + j, "%02X", c);
432 j += 2;
433
434 ++i;
435 }
436
437 if (j <= linebuflen)
438 sprintf(linebuf + j, "\n");
439 }
440
441 #if DRM_LINUX
442
443 #include <sys/sysproto.h>
444
445 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
446
447 #define LINUX_IOCTL_DRM_MIN 0x6400
448 #define LINUX_IOCTL_DRM_MAX 0x64ff
449
450 static linux_ioctl_function_t drm_linux_ioctl;
451 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
452 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
453
454 /* The bits for in/out are switched on Linux */
455 #define LINUX_IOC_IN IOC_OUT
456 #define LINUX_IOC_OUT IOC_IN
457
458 static int
459 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
460 {
461 int error;
462 int cmd = args->cmd;
463
464 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
465 if (cmd & LINUX_IOC_IN)
466 args->cmd |= IOC_IN;
467 if (cmd & LINUX_IOC_OUT)
468 args->cmd |= IOC_OUT;
469
470 error = ioctl(p, (struct ioctl_args *)args);
471
472 return error;
473 }
474 #endif /* DRM_LINUX */
475
476 static int
477 drm_modevent(module_t mod, int type, void *data)
478 {
479
480 switch (type) {
481 case MOD_LOAD:
482 TUNABLE_INT_FETCH("drm.debug", &drm_debug);
483 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet);
484 break;
485 }
486 return (0);
487 }
488
489 static moduledata_t drm_mod = {
490 "drmn",
491 drm_modevent,
492 0
493 };
494
495 DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
496 MODULE_VERSION(drmn, 1);
497 MODULE_DEPEND(drmn, agp, 1, 1, 1);
498 MODULE_DEPEND(drmn, pci, 1, 1, 1);
499 MODULE_DEPEND(drmn, mem, 1, 1, 1);
500 MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
Cache object: 75c891e88bbe037844ab3f16f7ffc3d3
|