FreeBSD/Linux Kernel Cross Reference
sys/pci/agp.c
1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_bus.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/agpio.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <pci/agppriv.h>
48 #include <pci/agpvar.h>
49 #include <pci/agpreg.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pageout.h>
55 #include <vm/pmap.h>
56
57 #include <machine/md_var.h>
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/rman.h>
61
62 MODULE_VERSION(agp, 1);
63
64 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
65
66 /* agp_drv.c */
67 static d_open_t agp_open;
68 static d_close_t agp_close;
69 static d_ioctl_t agp_ioctl;
70 static d_mmap_t agp_mmap;
71
72 static struct cdevsw agp_cdevsw = {
73 .d_version = D_VERSION,
74 .d_flags = D_NEEDGIANT,
75 .d_open = agp_open,
76 .d_close = agp_close,
77 .d_ioctl = agp_ioctl,
78 .d_mmap = agp_mmap,
79 .d_name = "agp",
80 };
81
82 static devclass_t agp_devclass;
83 #define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
84
85 /* Helper functions for implementing chipset mini drivers. */
86
87 void
88 agp_flush_cache()
89 {
90 #if defined(__i386__) || defined(__amd64__)
91 wbinvd();
92 #endif
93 #ifdef __alpha__
94 /* FIXME: This is most likely not correct as it doesn't flush CPU
95 * write caches, but we don't have a facility to do that and
96 * this is all linux does, too */
97 alpha_mb();
98 #endif
99 }
100
101 u_int8_t
102 agp_find_caps(device_t dev)
103 {
104 int capreg;
105
106
107 if (pci_find_extcap(dev, PCIY_AGP, &capreg) != 0)
108 capreg = 0;
109 return (capreg);
110 }
111
112 /*
113 * Find an AGP display device (if any).
114 */
115 static device_t
116 agp_find_display(void)
117 {
118 devclass_t pci = devclass_find("pci");
119 device_t bus, dev = 0;
120 device_t *kids;
121 int busnum, numkids, i;
122
123 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
124 bus = devclass_get_device(pci, busnum);
125 if (!bus)
126 continue;
127 device_get_children(bus, &kids, &numkids);
128 for (i = 0; i < numkids; i++) {
129 dev = kids[i];
130 if (pci_get_class(dev) == PCIC_DISPLAY
131 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
132 if (agp_find_caps(dev)) {
133 free(kids, M_TEMP);
134 return dev;
135 }
136
137 }
138 free(kids, M_TEMP);
139 }
140
141 return 0;
142 }
143
144 struct agp_gatt *
145 agp_alloc_gatt(device_t dev)
146 {
147 u_int32_t apsize = AGP_GET_APERTURE(dev);
148 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
149 struct agp_gatt *gatt;
150
151 if (bootverbose)
152 device_printf(dev,
153 "allocating GATT for aperture of size %dM\n",
154 apsize / (1024*1024));
155
156 if (entries == 0) {
157 device_printf(dev, "bad aperture size\n");
158 return NULL;
159 }
160
161 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
162 if (!gatt)
163 return 0;
164
165 gatt->ag_entries = entries;
166 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
167 0, ~0, PAGE_SIZE, 0);
168 if (!gatt->ag_virtual) {
169 if (bootverbose)
170 device_printf(dev, "contiguous allocation failed\n");
171 free(gatt, M_AGP);
172 return 0;
173 }
174 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
175 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
176 agp_flush_cache();
177
178 return gatt;
179 }
180
181 void
182 agp_free_gatt(struct agp_gatt *gatt)
183 {
184 contigfree(gatt->ag_virtual,
185 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
186 free(gatt, M_AGP);
187 }
188
189 static int agp_max[][2] = {
190 {0, 0},
191 {32, 4},
192 {64, 28},
193 {128, 96},
194 {256, 204},
195 {512, 440},
196 {1024, 942},
197 {2048, 1920},
198 {4096, 3932}
199 };
200 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
201
202 int
203 agp_generic_attach(device_t dev)
204 {
205 struct agp_softc *sc = device_get_softc(dev);
206 int rid, memsize, i;
207
208 /*
209 * Find and map the aperture.
210 */
211 rid = AGP_APBASE;
212 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
213 RF_ACTIVE);
214 if (!sc->as_aperture)
215 return ENOMEM;
216
217 /*
218 * Work out an upper bound for agp memory allocation. This
219 * uses a heurisitc table from the Linux driver.
220 */
221 memsize = ptoa(Maxmem) >> 20;
222 for (i = 0; i < agp_max_size; i++) {
223 if (memsize <= agp_max[i][0])
224 break;
225 }
226 if (i == agp_max_size) i = agp_max_size - 1;
227 sc->as_maxmem = agp_max[i][1] << 20U;
228
229 /*
230 * The lock is used to prevent re-entry to
231 * agp_generic_bind_memory() since that function can sleep.
232 */
233 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
234
235 /*
236 * Initialise stuff for the userland device.
237 */
238 agp_devclass = devclass_find("agp");
239 TAILQ_INIT(&sc->as_memory);
240 sc->as_nextid = 1;
241
242 sc->as_devnode = make_dev(&agp_cdevsw,
243 device_get_unit(dev),
244 UID_ROOT,
245 GID_WHEEL,
246 0600,
247 "agpgart");
248
249 return 0;
250 }
251
252 void
253 agp_free_cdev(device_t dev)
254 {
255 struct agp_softc *sc = device_get_softc(dev);
256
257 destroy_dev(sc->as_devnode);
258 }
259
260 void
261 agp_free_res(device_t dev)
262 {
263 struct agp_softc *sc = device_get_softc(dev);
264
265 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
266 mtx_destroy(&sc->as_lock);
267 agp_flush_cache();
268 }
269
270 int
271 agp_generic_detach(device_t dev)
272 {
273
274 agp_free_cdev(dev);
275 agp_free_res(dev);
276 return 0;
277 }
278
279 /*
280 * This does the enable logic for v3, with the same topology
281 * restrictions as in place for v2 -- one bus, one device on the bus.
282 */
283 static int
284 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
285 {
286 u_int32_t tstatus, mstatus;
287 u_int32_t command;
288 int rq, sba, fw, rate, arqsz, cal;
289
290 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
291 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
292
293 /* Set RQ to the min of mode, tstatus and mstatus */
294 rq = AGP_MODE_GET_RQ(mode);
295 if (AGP_MODE_GET_RQ(tstatus) < rq)
296 rq = AGP_MODE_GET_RQ(tstatus);
297 if (AGP_MODE_GET_RQ(mstatus) < rq)
298 rq = AGP_MODE_GET_RQ(mstatus);
299
300 /*
301 * ARQSZ - Set the value to the maximum one.
302 * Don't allow the mode register to override values.
303 */
304 arqsz = AGP_MODE_GET_ARQSZ(mode);
305 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
306 rq = AGP_MODE_GET_ARQSZ(tstatus);
307 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
308 rq = AGP_MODE_GET_ARQSZ(mstatus);
309
310 /* Calibration cycle - don't allow override by mode register */
311 cal = AGP_MODE_GET_CAL(tstatus);
312 if (AGP_MODE_GET_CAL(mstatus) < cal)
313 cal = AGP_MODE_GET_CAL(mstatus);
314
315 /* SBA must be supported for AGP v3. */
316 sba = 1;
317
318 /* Set FW if all three support it. */
319 fw = (AGP_MODE_GET_FW(tstatus)
320 & AGP_MODE_GET_FW(mstatus)
321 & AGP_MODE_GET_FW(mode));
322
323 /* Figure out the max rate */
324 rate = (AGP_MODE_GET_RATE(tstatus)
325 & AGP_MODE_GET_RATE(mstatus)
326 & AGP_MODE_GET_RATE(mode));
327 if (rate & AGP_MODE_V3_RATE_8x)
328 rate = AGP_MODE_V3_RATE_8x;
329 else
330 rate = AGP_MODE_V3_RATE_4x;
331 if (bootverbose)
332 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
333
334 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
335
336 /* Construct the new mode word and tell the hardware */
337 command = AGP_MODE_SET_RQ(0, rq);
338 command = AGP_MODE_SET_ARQSZ(command, arqsz);
339 command = AGP_MODE_SET_CAL(command, cal);
340 command = AGP_MODE_SET_SBA(command, sba);
341 command = AGP_MODE_SET_FW(command, fw);
342 command = AGP_MODE_SET_RATE(command, rate);
343 command = AGP_MODE_SET_AGP(command, 1);
344 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
345 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
346
347 return 0;
348 }
349
350 static int
351 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
352 {
353 u_int32_t tstatus, mstatus;
354 u_int32_t command;
355 int rq, sba, fw, rate;
356
357 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
358 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
359
360 /* Set RQ to the min of mode, tstatus and mstatus */
361 rq = AGP_MODE_GET_RQ(mode);
362 if (AGP_MODE_GET_RQ(tstatus) < rq)
363 rq = AGP_MODE_GET_RQ(tstatus);
364 if (AGP_MODE_GET_RQ(mstatus) < rq)
365 rq = AGP_MODE_GET_RQ(mstatus);
366
367 /* Set SBA if all three can deal with SBA */
368 sba = (AGP_MODE_GET_SBA(tstatus)
369 & AGP_MODE_GET_SBA(mstatus)
370 & AGP_MODE_GET_SBA(mode));
371
372 /* Similar for FW */
373 fw = (AGP_MODE_GET_FW(tstatus)
374 & AGP_MODE_GET_FW(mstatus)
375 & AGP_MODE_GET_FW(mode));
376
377 /* Figure out the max rate */
378 rate = (AGP_MODE_GET_RATE(tstatus)
379 & AGP_MODE_GET_RATE(mstatus)
380 & AGP_MODE_GET_RATE(mode));
381 if (rate & AGP_MODE_V2_RATE_4x)
382 rate = AGP_MODE_V2_RATE_4x;
383 else if (rate & AGP_MODE_V2_RATE_2x)
384 rate = AGP_MODE_V2_RATE_2x;
385 else
386 rate = AGP_MODE_V2_RATE_1x;
387 if (bootverbose)
388 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
389
390 /* Construct the new mode word and tell the hardware */
391 command = AGP_MODE_SET_RQ(0, rq);
392 command = AGP_MODE_SET_SBA(command, sba);
393 command = AGP_MODE_SET_FW(command, fw);
394 command = AGP_MODE_SET_RATE(command, rate);
395 command = AGP_MODE_SET_AGP(command, 1);
396 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
397 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
398
399 return 0;
400 }
401
402 int
403 agp_generic_enable(device_t dev, u_int32_t mode)
404 {
405 device_t mdev = agp_find_display();
406 u_int32_t tstatus, mstatus;
407
408 if (!mdev) {
409 AGP_DPF("can't find display\n");
410 return ENXIO;
411 }
412
413 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
414 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
415
416 /*
417 * Check display and bridge for AGP v3 support. AGP v3 allows
418 * more variety in topology than v2, e.g. multiple AGP devices
419 * attached to one bridge, or multiple AGP bridges in one
420 * system. This doesn't attempt to address those situations,
421 * but should work fine for a classic single AGP slot system
422 * with AGP v3.
423 */
424 if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
425 return (agp_v3_enable(dev, mdev, mode));
426 else
427 return (agp_v2_enable(dev, mdev, mode));
428 }
429
430 struct agp_memory *
431 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
432 {
433 struct agp_softc *sc = device_get_softc(dev);
434 struct agp_memory *mem;
435
436 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
437 return 0;
438
439 if (sc->as_allocated + size > sc->as_maxmem)
440 return 0;
441
442 if (type != 0) {
443 printf("agp_generic_alloc_memory: unsupported type %d\n",
444 type);
445 return 0;
446 }
447
448 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
449 mem->am_id = sc->as_nextid++;
450 mem->am_size = size;
451 mem->am_type = 0;
452 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
453 mem->am_physical = 0;
454 mem->am_offset = 0;
455 mem->am_is_bound = 0;
456 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
457 sc->as_allocated += size;
458
459 return mem;
460 }
461
462 int
463 agp_generic_free_memory(device_t dev, struct agp_memory *mem)
464 {
465 struct agp_softc *sc = device_get_softc(dev);
466
467 if (mem->am_is_bound)
468 return EBUSY;
469
470 sc->as_allocated -= mem->am_size;
471 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
472 vm_object_deallocate(mem->am_obj);
473 free(mem, M_AGP);
474 return 0;
475 }
476
477 int
478 agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
479 vm_offset_t offset)
480 {
481 struct agp_softc *sc = device_get_softc(dev);
482 vm_offset_t i, j, k;
483 vm_page_t m;
484 int error;
485
486 /* Do some sanity checks first. */
487 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
488 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
489 device_printf(dev, "binding memory at bad offset %#x\n",
490 (int)offset);
491 return EINVAL;
492 }
493
494 /*
495 * Allocate the pages early, before acquiring the lock,
496 * because vm_page_grab() used with VM_ALLOC_RETRY may
497 * block and we can't hold a mutex while blocking.
498 */
499 VM_OBJECT_LOCK(mem->am_obj);
500 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
501 /*
502 * Find a page from the object and wire it
503 * down. This page will be mapped using one or more
504 * entries in the GATT (assuming that PAGE_SIZE >=
505 * AGP_PAGE_SIZE. If this is the first call to bind,
506 * the pages will be allocated and zeroed.
507 */
508 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
509 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
510 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
511 }
512 VM_OBJECT_UNLOCK(mem->am_obj);
513
514 mtx_lock(&sc->as_lock);
515
516 if (mem->am_is_bound) {
517 device_printf(dev, "memory already bound\n");
518 error = EINVAL;
519 VM_OBJECT_LOCK(mem->am_obj);
520 goto bad;
521 }
522
523 /*
524 * Bind the individual pages and flush the chipset's
525 * TLB.
526 *
527 * XXX Presumably, this needs to be the pci address on alpha
528 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
529 * alpha AGP hardware to check.
530 */
531 VM_OBJECT_LOCK(mem->am_obj);
532 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
533 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
534
535 /*
536 * Install entries in the GATT, making sure that if
537 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
538 * aligned to PAGE_SIZE, we don't modify too many GATT
539 * entries.
540 */
541 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
542 j += AGP_PAGE_SIZE) {
543 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
544 AGP_DPF("binding offset %#x to pa %#x\n",
545 offset + i + j, pa);
546 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
547 if (error) {
548 /*
549 * Bail out. Reverse all the mappings
550 * and unwire the pages.
551 */
552 vm_page_lock_queues();
553 vm_page_wakeup(m);
554 vm_page_unlock_queues();
555 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
556 AGP_UNBIND_PAGE(dev, offset + k);
557 goto bad;
558 }
559 }
560 vm_page_lock_queues();
561 vm_page_wakeup(m);
562 vm_page_unlock_queues();
563 }
564 VM_OBJECT_UNLOCK(mem->am_obj);
565
566 /*
567 * Flush the cpu cache since we are providing a new mapping
568 * for these pages.
569 */
570 agp_flush_cache();
571
572 /*
573 * Make sure the chipset gets the new mappings.
574 */
575 AGP_FLUSH_TLB(dev);
576
577 mem->am_offset = offset;
578 mem->am_is_bound = 1;
579
580 mtx_unlock(&sc->as_lock);
581
582 return 0;
583 bad:
584 mtx_unlock(&sc->as_lock);
585 VM_OBJECT_LOCK_ASSERT(mem->am_obj, MA_OWNED);
586 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
587 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
588 vm_page_lock_queues();
589 vm_page_unwire(m, 0);
590 vm_page_unlock_queues();
591 }
592 VM_OBJECT_UNLOCK(mem->am_obj);
593
594 return error;
595 }
596
597 int
598 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
599 {
600 struct agp_softc *sc = device_get_softc(dev);
601 vm_page_t m;
602 int i;
603
604 mtx_lock(&sc->as_lock);
605
606 if (!mem->am_is_bound) {
607 device_printf(dev, "memory is not bound\n");
608 mtx_unlock(&sc->as_lock);
609 return EINVAL;
610 }
611
612
613 /*
614 * Unbind the individual pages and flush the chipset's
615 * TLB. Unwire the pages so they can be swapped.
616 */
617 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
618 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
619 VM_OBJECT_LOCK(mem->am_obj);
620 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
621 m = vm_page_lookup(mem->am_obj, atop(i));
622 vm_page_lock_queues();
623 vm_page_unwire(m, 0);
624 vm_page_unlock_queues();
625 }
626 VM_OBJECT_UNLOCK(mem->am_obj);
627
628 agp_flush_cache();
629 AGP_FLUSH_TLB(dev);
630
631 mem->am_offset = 0;
632 mem->am_is_bound = 0;
633
634 mtx_unlock(&sc->as_lock);
635
636 return 0;
637 }
638
639 /* Helper functions for implementing user/kernel api */
640
641 static int
642 agp_acquire_helper(device_t dev, enum agp_acquire_state state)
643 {
644 struct agp_softc *sc = device_get_softc(dev);
645
646 if (sc->as_state != AGP_ACQUIRE_FREE)
647 return EBUSY;
648 sc->as_state = state;
649
650 return 0;
651 }
652
653 static int
654 agp_release_helper(device_t dev, enum agp_acquire_state state)
655 {
656 struct agp_softc *sc = device_get_softc(dev);
657
658 if (sc->as_state == AGP_ACQUIRE_FREE)
659 return 0;
660
661 if (sc->as_state != state)
662 return EBUSY;
663
664 sc->as_state = AGP_ACQUIRE_FREE;
665 return 0;
666 }
667
668 static struct agp_memory *
669 agp_find_memory(device_t dev, int id)
670 {
671 struct agp_softc *sc = device_get_softc(dev);
672 struct agp_memory *mem;
673
674 AGP_DPF("searching for memory block %d\n", id);
675 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
676 AGP_DPF("considering memory block %d\n", mem->am_id);
677 if (mem->am_id == id)
678 return mem;
679 }
680 return 0;
681 }
682
683 /* Implementation of the userland ioctl api */
684
685 static int
686 agp_info_user(device_t dev, agp_info *info)
687 {
688 struct agp_softc *sc = device_get_softc(dev);
689
690 bzero(info, sizeof *info);
691 info->bridge_id = pci_get_devid(dev);
692 info->agp_mode =
693 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
694 info->aper_base = rman_get_start(sc->as_aperture);
695 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
696 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
697 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
698
699 return 0;
700 }
701
702 static int
703 agp_setup_user(device_t dev, agp_setup *setup)
704 {
705 return AGP_ENABLE(dev, setup->agp_mode);
706 }
707
708 static int
709 agp_allocate_user(device_t dev, agp_allocate *alloc)
710 {
711 struct agp_memory *mem;
712
713 mem = AGP_ALLOC_MEMORY(dev,
714 alloc->type,
715 alloc->pg_count << AGP_PAGE_SHIFT);
716 if (mem) {
717 alloc->key = mem->am_id;
718 alloc->physical = mem->am_physical;
719 return 0;
720 } else {
721 return ENOMEM;
722 }
723 }
724
725 static int
726 agp_deallocate_user(device_t dev, int id)
727 {
728 struct agp_memory *mem = agp_find_memory(dev, id);;
729
730 if (mem) {
731 AGP_FREE_MEMORY(dev, mem);
732 return 0;
733 } else {
734 return ENOENT;
735 }
736 }
737
738 static int
739 agp_bind_user(device_t dev, agp_bind *bind)
740 {
741 struct agp_memory *mem = agp_find_memory(dev, bind->key);
742
743 if (!mem)
744 return ENOENT;
745
746 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
747 }
748
749 static int
750 agp_unbind_user(device_t dev, agp_unbind *unbind)
751 {
752 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
753
754 if (!mem)
755 return ENOENT;
756
757 return AGP_UNBIND_MEMORY(dev, mem);
758 }
759
760 static int
761 agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
762 {
763 device_t dev = KDEV2DEV(kdev);
764 struct agp_softc *sc = device_get_softc(dev);
765
766 if (!sc->as_isopen) {
767 sc->as_isopen = 1;
768 device_busy(dev);
769 }
770
771 return 0;
772 }
773
774 static int
775 agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
776 {
777 device_t dev = KDEV2DEV(kdev);
778 struct agp_softc *sc = device_get_softc(dev);
779 struct agp_memory *mem;
780
781 /*
782 * Clear the GATT and force release on last close
783 */
784 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
785 if (mem->am_is_bound)
786 AGP_UNBIND_MEMORY(dev, mem);
787 AGP_FREE_MEMORY(dev, mem);
788 }
789 if (sc->as_state == AGP_ACQUIRE_USER)
790 agp_release_helper(dev, AGP_ACQUIRE_USER);
791 sc->as_isopen = 0;
792 device_unbusy(dev);
793
794 return 0;
795 }
796
797 static int
798 agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
799 {
800 device_t dev = KDEV2DEV(kdev);
801
802 switch (cmd) {
803 case AGPIOC_INFO:
804 return agp_info_user(dev, (agp_info *) data);
805
806 case AGPIOC_ACQUIRE:
807 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
808
809 case AGPIOC_RELEASE:
810 return agp_release_helper(dev, AGP_ACQUIRE_USER);
811
812 case AGPIOC_SETUP:
813 return agp_setup_user(dev, (agp_setup *)data);
814
815 case AGPIOC_ALLOCATE:
816 return agp_allocate_user(dev, (agp_allocate *)data);
817
818 case AGPIOC_DEALLOCATE:
819 return agp_deallocate_user(dev, *(int *) data);
820
821 case AGPIOC_BIND:
822 return agp_bind_user(dev, (agp_bind *)data);
823
824 case AGPIOC_UNBIND:
825 return agp_unbind_user(dev, (agp_unbind *)data);
826
827 }
828
829 return EINVAL;
830 }
831
832 static int
833 agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
834 {
835 device_t dev = KDEV2DEV(kdev);
836 struct agp_softc *sc = device_get_softc(dev);
837
838 if (offset > AGP_GET_APERTURE(dev))
839 return -1;
840 *paddr = rman_get_start(sc->as_aperture) + offset;
841 return 0;
842 }
843
844 /* Implementation of the kernel api */
845
846 device_t
847 agp_find_device()
848 {
849 if (!agp_devclass)
850 return 0;
851 return devclass_get_device(agp_devclass, 0);
852 }
853
854 enum agp_acquire_state
855 agp_state(device_t dev)
856 {
857 struct agp_softc *sc = device_get_softc(dev);
858 return sc->as_state;
859 }
860
861 void
862 agp_get_info(device_t dev, struct agp_info *info)
863 {
864 struct agp_softc *sc = device_get_softc(dev);
865
866 info->ai_mode =
867 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
868 info->ai_aperture_base = rman_get_start(sc->as_aperture);
869 info->ai_aperture_size = rman_get_size(sc->as_aperture);
870 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
871 info->ai_memory_allowed = sc->as_maxmem;
872 info->ai_memory_used = sc->as_allocated;
873 }
874
875 int
876 agp_acquire(device_t dev)
877 {
878 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
879 }
880
881 int
882 agp_release(device_t dev)
883 {
884 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
885 }
886
887 int
888 agp_enable(device_t dev, u_int32_t mode)
889 {
890 return AGP_ENABLE(dev, mode);
891 }
892
893 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
894 {
895 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
896 }
897
898 void agp_free_memory(device_t dev, void *handle)
899 {
900 struct agp_memory *mem = (struct agp_memory *) handle;
901 AGP_FREE_MEMORY(dev, mem);
902 }
903
904 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
905 {
906 struct agp_memory *mem = (struct agp_memory *) handle;
907 return AGP_BIND_MEMORY(dev, mem, offset);
908 }
909
910 int agp_unbind_memory(device_t dev, void *handle)
911 {
912 struct agp_memory *mem = (struct agp_memory *) handle;
913 return AGP_UNBIND_MEMORY(dev, mem);
914 }
915
916 void agp_memory_info(device_t dev, void *handle, struct
917 agp_memory_info *mi)
918 {
919 struct agp_memory *mem = (struct agp_memory *) handle;
920
921 mi->ami_size = mem->am_size;
922 mi->ami_physical = mem->am_physical;
923 mi->ami_offset = mem->am_offset;
924 mi->ami_is_bound = mem->am_is_bound;
925 }
Cache object: bc6756939d0b0b0750ece6e297db9213
|