FreeBSD/Linux Kernel Cross Reference
sys/dev/drm2/drm_pci.c
1 /* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
2 /**
3 * \file drm_pci.c
4 * \brief Functions and ioctls to manage PCI memory
5 *
6 * \warning These interfaces aren't stable yet.
7 *
8 * \todo Implement the remaining ioctl's for the PCI pools.
9 * \todo The wrappers here are so thin that they would be better off inlined..
10 *
11 * \author José Fonseca <jrfonseca@tungstengraphics.com>
12 * \author Leif Delgass <ldelgass@retinalburn.net>
13 */
14
15 /*
16 * Copyright 2003 José Fonseca.
17 * Copyright 2003 Leif Delgass.
18 * All Rights Reserved.
19 *
20 * Permission is hereby granted, free of charge, to any person obtaining a
21 * copy of this software and associated documentation files (the "Software"),
22 * to deal in the Software without restriction, including without limitation
23 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
24 * and/or sell copies of the Software, and to permit persons to whom the
25 * Software is furnished to do so, subject to the following conditions:
26 *
27 * The above copyright notice and this permission notice (including the next
28 * paragraph) shall be included in all copies or substantial portions of the
29 * Software.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
35 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
36 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
37 */
38
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41
42 #include <dev/drm2/drmP.h>
43
44 static int drm_msi = 1; /* Enable by default. */
45 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL,
46 "DRM device");
47 SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
48 "Enable MSI interrupts for drm devices");
49
50 /**********************************************************************/
51 /** \name PCI memory */
52 /*@{*/
53
54 static void
55 drm_pci_busdma_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
56 {
57 drm_dma_handle_t *dmah = arg;
58
59 if (error != 0)
60 return;
61
62 KASSERT(nsegs == 1, ("drm_pci_busdma_callback: bad dma segment count"));
63 dmah->busaddr = segs[0].ds_addr;
64 }
65
66 /**
67 * \brief Allocate a PCI consistent memory block, for DMA.
68 */
69 drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size,
70 size_t align, dma_addr_t maxaddr)
71 {
72 drm_dma_handle_t *dmah;
73 int ret;
74
75 /* Need power-of-two alignment, so fail the allocation if it isn't. */
76 if ((align & (align - 1)) != 0) {
77 DRM_ERROR("drm_pci_alloc with non-power-of-two alignment %d\n",
78 (int)align);
79 return NULL;
80 }
81
82 dmah = malloc(sizeof(drm_dma_handle_t), DRM_MEM_DMA, M_ZERO | M_NOWAIT);
83 if (dmah == NULL)
84 return NULL;
85
86 /* Make sure we aren't holding mutexes here */
87 mtx_assert(&dev->dma_lock, MA_NOTOWNED);
88 if (mtx_owned(&dev->dma_lock))
89 DRM_ERROR("called while holding dma_lock\n");
90
91 ret = bus_dma_tag_create(
92 bus_get_dma_tag(dev->dev), /* parent */
93 align, 0, /* align, boundary */
94 maxaddr, BUS_SPACE_MAXADDR, /* lowaddr, highaddr */
95 NULL, NULL, /* filtfunc, filtfuncargs */
96 size, 1, size, /* maxsize, nsegs, maxsegsize */
97 0, NULL, NULL, /* flags, lockfunc, lockfuncargs */
98 &dmah->tag);
99 if (ret != 0) {
100 free(dmah, DRM_MEM_DMA);
101 return NULL;
102 }
103
104 ret = bus_dmamem_alloc(dmah->tag, &dmah->vaddr,
105 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &dmah->map);
106 if (ret != 0) {
107 bus_dma_tag_destroy(dmah->tag);
108 free(dmah, DRM_MEM_DMA);
109 return NULL;
110 }
111
112 ret = bus_dmamap_load(dmah->tag, dmah->map, dmah->vaddr, size,
113 drm_pci_busdma_callback, dmah, BUS_DMA_NOWAIT);
114 if (ret != 0) {
115 bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
116 bus_dma_tag_destroy(dmah->tag);
117 free(dmah, DRM_MEM_DMA);
118 return NULL;
119 }
120
121 return dmah;
122 }
123
124 EXPORT_SYMBOL(drm_pci_alloc);
125
126 /**
127 * \brief Free a PCI consistent memory block without freeing its descriptor.
128 *
129 * This function is for internal use in the Linux-specific DRM core code.
130 */
131 void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
132 {
133 if (dmah == NULL)
134 return;
135
136 bus_dmamap_unload(dmah->tag, dmah->map);
137 bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
138 bus_dma_tag_destroy(dmah->tag);
139 }
140
141 /**
142 * \brief Free a PCI consistent memory block
143 */
144 void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
145 {
146 __drm_pci_free(dev, dmah);
147 free(dmah, DRM_MEM_DMA);
148 }
149
150 EXPORT_SYMBOL(drm_pci_free);
151
152 static int drm_get_pci_domain(struct drm_device *dev)
153 {
154 return dev->pci_domain;
155 }
156
157 static int drm_pci_get_irq(struct drm_device *dev)
158 {
159
160 if (dev->irqr)
161 return (dev->irq);
162
163 dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
164 &dev->irqrid, RF_SHAREABLE);
165 if (!dev->irqr) {
166 dev_err(dev->dev, "Failed to allocate IRQ\n");
167 return (0);
168 }
169
170 dev->irq = (int) rman_get_start(dev->irqr);
171
172 return (dev->irq);
173 }
174
175 static void drm_pci_free_irq(struct drm_device *dev)
176 {
177 if (dev->irqr == NULL)
178 return;
179
180 bus_release_resource(dev->dev, SYS_RES_IRQ,
181 dev->irqrid, dev->irqr);
182
183 dev->irqr = NULL;
184 dev->irq = 0;
185 }
186
187 static const char *drm_pci_get_name(struct drm_device *dev)
188 {
189 return dev->driver->name;
190 }
191
192 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
193 {
194 int len, ret;
195 master->unique_len = 40;
196 master->unique_size = master->unique_len;
197 master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_NOWAIT);
198 if (master->unique == NULL)
199 return -ENOMEM;
200
201
202 len = snprintf(master->unique, master->unique_len,
203 "pci:%04x:%02x:%02x.%d",
204 dev->pci_domain,
205 dev->pci_bus,
206 dev->pci_slot,
207 dev->pci_func);
208
209 if (len >= master->unique_len) {
210 DRM_ERROR("buffer overflow");
211 ret = -EINVAL;
212 goto err;
213 } else
214 master->unique_len = len;
215
216 return 0;
217 err:
218 return ret;
219 }
220
221 int drm_pci_set_unique(struct drm_device *dev,
222 struct drm_master *master,
223 struct drm_unique *u)
224 {
225 int domain, bus, slot, func, ret;
226
227 master->unique_len = u->unique_len;
228 master->unique_size = u->unique_len + 1;
229 master->unique = malloc(master->unique_size, DRM_MEM_DRIVER, M_WAITOK);
230 if (!master->unique) {
231 ret = -ENOMEM;
232 goto err;
233 }
234
235 if (copy_from_user(master->unique, u->unique, master->unique_len)) {
236 ret = -EFAULT;
237 goto err;
238 }
239
240 master->unique[master->unique_len] = '\0';
241
242 /* Return error if the busid submitted doesn't match the device's actual
243 * busid.
244 */
245 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
246 if (ret != 3) {
247 ret = -EINVAL;
248 goto err;
249 }
250
251 domain = bus >> 8;
252 bus &= 0xff;
253
254 if ((domain != dev->pci_domain) ||
255 (bus != dev->pci_bus) ||
256 (slot != dev->pci_slot) ||
257 (func != dev->pci_func)) {
258 ret = -EINVAL;
259 goto err;
260 }
261 return 0;
262 err:
263 return ret;
264 }
265
266
267 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
268 {
269 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
270 (p->busnum & 0xff) != dev->pci_bus ||
271 p->devnum != dev->pci_slot || p->funcnum != dev->pci_func)
272 return -EINVAL;
273
274 p->irq = dev->irq;
275
276 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
277 p->irq);
278 return 0;
279 }
280
281 int drm_pci_agp_init(struct drm_device *dev)
282 {
283 if (drm_core_has_AGP(dev)) {
284 if (drm_pci_device_is_agp(dev))
285 dev->agp = drm_agp_init(dev);
286 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
287 && (dev->agp == NULL)) {
288 DRM_ERROR("Cannot initialize the agpgart module.\n");
289 return -EINVAL;
290 }
291 if (drm_core_has_MTRR(dev)) {
292 if (dev->agp && dev->agp->agp_info.ai_aperture_base != 0) {
293 if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
294 dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
295 dev->agp->agp_mtrr = 1;
296 else
297 dev->agp->agp_mtrr = -1;
298 }
299 }
300 }
301 return 0;
302 }
303
304 static struct drm_bus drm_pci_bus = {
305 .bus_type = DRIVER_BUS_PCI,
306 .get_irq = drm_pci_get_irq,
307 .free_irq = drm_pci_free_irq,
308 .get_name = drm_pci_get_name,
309 .set_busid = drm_pci_set_busid,
310 .set_unique = drm_pci_set_unique,
311 .irq_by_busid = drm_pci_irq_by_busid,
312 .agp_init = drm_pci_agp_init,
313 };
314
315 /**
316 * Register.
317 *
318 * \param pdev - PCI device structure
319 * \param ent entry from the PCI ID table with device type flags
320 * \return zero on success or a negative number on failure.
321 *
322 * Attempt to gets inter module "drm" information. If we are first
323 * then register the character device and inter module information.
324 * Try and register, if we fail to register, backout previous work.
325 */
326 int drm_get_pci_dev(device_t kdev, struct drm_device *dev,
327 struct drm_driver *driver)
328 {
329 int ret;
330
331 DRM_DEBUG("\n");
332
333 driver->bus = &drm_pci_bus;
334
335 dev->dev = kdev;
336
337 dev->pci_domain = pci_get_domain(dev->dev);
338 dev->pci_bus = pci_get_bus(dev->dev);
339 dev->pci_slot = pci_get_slot(dev->dev);
340 dev->pci_func = pci_get_function(dev->dev);
341
342 dev->pci_vendor = pci_get_vendor(dev->dev);
343 dev->pci_device = pci_get_device(dev->dev);
344 dev->pci_subvendor = pci_get_subvendor(dev->dev);
345 dev->pci_subdevice = pci_get_subdevice(dev->dev);
346
347 sx_xlock(&drm_global_mutex);
348
349 if ((ret = drm_fill_in_dev(dev, driver))) {
350 DRM_ERROR("Failed to fill in dev: %d\n", ret);
351 goto err_g1;
352 }
353
354 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
355 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
356 if (ret)
357 goto err_g2;
358 }
359
360 if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
361 goto err_g3;
362
363 if (dev->driver->load) {
364 ret = dev->driver->load(dev,
365 dev->id_entry->driver_private);
366 if (ret)
367 goto err_g4;
368 }
369
370 /* setup the grouping for the legacy output */
371 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
372 ret = drm_mode_group_init_legacy_group(dev,
373 &dev->primary->mode_group);
374 if (ret)
375 goto err_g5;
376 }
377
378 #ifdef FREEBSD_NOTYET
379 list_add_tail(&dev->driver_item, &driver->device_list);
380 #endif /* FREEBSD_NOTYET */
381
382 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
383 driver->name, driver->major, driver->minor, driver->patchlevel,
384 driver->date, device_get_nameunit(dev->dev), dev->primary->index);
385
386 sx_xunlock(&drm_global_mutex);
387 return 0;
388
389 err_g5:
390 if (dev->driver->unload)
391 dev->driver->unload(dev);
392 err_g4:
393 drm_put_minor(&dev->primary);
394 err_g3:
395 if (drm_core_check_feature(dev, DRIVER_MODESET))
396 drm_put_minor(&dev->control);
397 err_g2:
398 drm_cancel_fill_in_dev(dev);
399 err_g1:
400 sx_xunlock(&drm_global_mutex);
401 return ret;
402 }
403 EXPORT_SYMBOL(drm_get_pci_dev);
404
405 int
406 drm_pci_enable_msi(struct drm_device *dev)
407 {
408 int msicount, ret;
409
410 if (!drm_msi)
411 return (-ENOENT);
412
413 msicount = pci_msi_count(dev->dev);
414 DRM_DEBUG("MSI count = %d\n", msicount);
415 if (msicount > 1)
416 msicount = 1;
417
418 ret = pci_alloc_msi(dev->dev, &msicount);
419 if (ret == 0) {
420 DRM_INFO("MSI enabled %d message(s)\n", msicount);
421 dev->msi_enabled = 1;
422 dev->irqrid = 1;
423 }
424
425 return (-ret);
426 }
427
428 void
429 drm_pci_disable_msi(struct drm_device *dev)
430 {
431
432 if (!dev->msi_enabled)
433 return;
434
435 pci_release_msi(dev->dev);
436 dev->msi_enabled = 0;
437 dev->irqrid = 0;
438 }
439
440 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
441 {
442 device_t root;
443 int pos;
444 u32 lnkcap = 0, lnkcap2 = 0;
445
446 *mask = 0;
447 if (!drm_pci_device_is_pcie(dev))
448 return -EINVAL;
449
450 root =
451 device_get_parent( /* pcib */
452 device_get_parent( /* `-- pci */
453 device_get_parent( /* `-- vgapci */
454 dev->dev))); /* `-- drmn */
455
456 pos = 0;
457 pci_find_cap(root, PCIY_EXPRESS, &pos);
458 if (!pos)
459 return -EINVAL;
460
461 /* we've been informed via and serverworks don't make the cut */
462 if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
463 pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
464 return -EINVAL;
465
466 lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
467 lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
468
469 lnkcap &= PCIEM_LINK_CAP_MAX_SPEED;
470 lnkcap2 &= 0xfe;
471
472 #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */
473 #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */
474 #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */
475
476 if (lnkcap2) { /* PCIE GEN 3.0 */
477 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
478 *mask |= DRM_PCIE_SPEED_25;
479 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
480 *mask |= DRM_PCIE_SPEED_50;
481 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
482 *mask |= DRM_PCIE_SPEED_80;
483 } else {
484 if (lnkcap & 1)
485 *mask |= DRM_PCIE_SPEED_25;
486 if (lnkcap & 2)
487 *mask |= DRM_PCIE_SPEED_50;
488 }
489
490 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", pci_get_vendor(root), pci_get_device(root), lnkcap, lnkcap2);
491 return 0;
492 }
493 EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
Cache object: 0f984583fe603919f4d16d70817f0bb0
|