FreeBSD/Linux Kernel Cross Reference
sys/pci/agp.c
1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/5.2/sys/pci/agp.c 122513 2003-11-11 21:49:18Z anholt $");
29
30 #include "opt_bus.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/bus.h>
37 #include <sys/conf.h>
38 #include <sys/ioccom.h>
39 #include <sys/agpio.h>
40 #include <sys/lock.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <pci/agppriv.h>
48 #include <pci/agpvar.h>
49 #include <pci/agpreg.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pageout.h>
55 #include <vm/pmap.h>
56
57 #include <machine/md_var.h>
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/rman.h>
61
62 MODULE_VERSION(agp, 1);
63
64 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
65
66 #define CDEV_MAJOR 148
67 /* agp_drv.c */
68 static d_open_t agp_open;
69 static d_close_t agp_close;
70 static d_ioctl_t agp_ioctl;
71 static d_mmap_t agp_mmap;
72
73 static struct cdevsw agp_cdevsw = {
74 .d_open = agp_open,
75 .d_close = agp_close,
76 .d_ioctl = agp_ioctl,
77 .d_mmap = agp_mmap,
78 .d_name = "agp",
79 .d_maj = CDEV_MAJOR,
80 .d_flags = D_TTY,
81 };
82
83 static devclass_t agp_devclass;
84 #define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
85
86 /* Helper functions for implementing chipset mini drivers. */
87
88 void
89 agp_flush_cache()
90 {
91 #ifdef __i386__
92 wbinvd();
93 #endif
94 #ifdef __alpha__
95 /* FIXME: This is most likely not correct as it doesn't flush CPU
96 * write caches, but we don't have a facility to do that and
97 * this is all linux does, too */
98 alpha_mb();
99 #endif
100 }
101
102 u_int8_t
103 agp_find_caps(device_t dev)
104 {
105 u_int32_t status;
106 u_int8_t ptr, next;
107
108 /*
109 * Check the CAP_LIST bit of the PCI status register first.
110 */
111 status = pci_read_config(dev, PCIR_STATUS, 2);
112 if (!(status & 0x10))
113 return 0;
114
115 /*
116 * Traverse the capabilities list.
117 */
118 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
119 ptr != 0;
120 ptr = next) {
121 u_int32_t capid = pci_read_config(dev, ptr, 4);
122 next = AGP_CAPID_GET_NEXT_PTR(capid);
123
124 /*
125 * If this capability entry ID is 2, then we are done.
126 */
127 if (AGP_CAPID_GET_CAP_ID(capid) == 2)
128 return ptr;
129 }
130
131 return 0;
132 }
133
134 /*
135 * Find an AGP display device (if any).
136 */
137 static device_t
138 agp_find_display(void)
139 {
140 devclass_t pci = devclass_find("pci");
141 device_t bus, dev = 0;
142 device_t *kids;
143 int busnum, numkids, i;
144
145 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
146 bus = devclass_get_device(pci, busnum);
147 if (!bus)
148 continue;
149 device_get_children(bus, &kids, &numkids);
150 for (i = 0; i < numkids; i++) {
151 dev = kids[i];
152 if (pci_get_class(dev) == PCIC_DISPLAY
153 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
154 if (agp_find_caps(dev)) {
155 free(kids, M_TEMP);
156 return dev;
157 }
158
159 }
160 free(kids, M_TEMP);
161 }
162
163 return 0;
164 }
165
166 struct agp_gatt *
167 agp_alloc_gatt(device_t dev)
168 {
169 u_int32_t apsize = AGP_GET_APERTURE(dev);
170 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
171 struct agp_gatt *gatt;
172
173 if (bootverbose)
174 device_printf(dev,
175 "allocating GATT for aperture of size %dM\n",
176 apsize / (1024*1024));
177
178 if (entries == 0) {
179 device_printf(dev, "bad aperture size\n");
180 return NULL;
181 }
182
183 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
184 if (!gatt)
185 return 0;
186
187 gatt->ag_entries = entries;
188 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
189 0, ~0, PAGE_SIZE, 0);
190 if (!gatt->ag_virtual) {
191 if (bootverbose)
192 device_printf(dev, "contiguous allocation failed\n");
193 free(gatt, M_AGP);
194 return 0;
195 }
196 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
197 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
198 agp_flush_cache();
199
200 return gatt;
201 }
202
203 void
204 agp_free_gatt(struct agp_gatt *gatt)
205 {
206 contigfree(gatt->ag_virtual,
207 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
208 free(gatt, M_AGP);
209 }
210
211 static int agp_max[][2] = {
212 {0, 0},
213 {32, 4},
214 {64, 28},
215 {128, 96},
216 {256, 204},
217 {512, 440},
218 {1024, 942},
219 {2048, 1920},
220 {4096, 3932}
221 };
222 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
223
224 int
225 agp_generic_attach(device_t dev)
226 {
227 struct agp_softc *sc = device_get_softc(dev);
228 int rid, memsize, i;
229
230 /*
231 * Find and map the aperture.
232 */
233 rid = AGP_APBASE;
234 sc->as_aperture = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
235 0, ~0, 1, RF_ACTIVE);
236 if (!sc->as_aperture)
237 return ENOMEM;
238
239 /*
240 * Work out an upper bound for agp memory allocation. This
241 * uses a heurisitc table from the Linux driver.
242 */
243 memsize = ptoa(Maxmem) >> 20;
244 for (i = 0; i < agp_max_size; i++) {
245 if (memsize <= agp_max[i][0])
246 break;
247 }
248 if (i == agp_max_size) i = agp_max_size - 1;
249 sc->as_maxmem = agp_max[i][1] << 20U;
250
251 /*
252 * The lock is used to prevent re-entry to
253 * agp_generic_bind_memory() since that function can sleep.
254 */
255 lockinit(&sc->as_lock, PZERO|PCATCH, "agplk", 0, 0);
256
257 /*
258 * Initialise stuff for the userland device.
259 */
260 agp_devclass = devclass_find("agp");
261 TAILQ_INIT(&sc->as_memory);
262 sc->as_nextid = 1;
263
264 sc->as_devnode = make_dev(&agp_cdevsw,
265 device_get_unit(dev),
266 UID_ROOT,
267 GID_WHEEL,
268 0600,
269 "agpgart");
270
271 return 0;
272 }
273
274 int
275 agp_generic_detach(device_t dev)
276 {
277 struct agp_softc *sc = device_get_softc(dev);
278 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
279 lockmgr(&sc->as_lock, LK_DRAIN, 0, curthread);
280 lockdestroy(&sc->as_lock);
281 destroy_dev(sc->as_devnode);
282 agp_flush_cache();
283 return 0;
284 }
285
286 /*
287 * This does the enable logic for v3, with the same topology
288 * restrictions as in place for v2 -- one bus, one device on the bus.
289 */
290 static int
291 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
292 {
293 u_int32_t tstatus, mstatus;
294 u_int32_t command;
295 int rq, sba, fw, rate, arqsz, cal;
296
297 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
298 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
299
300 /* Set RQ to the min of mode, tstatus and mstatus */
301 rq = AGP_MODE_GET_RQ(mode);
302 if (AGP_MODE_GET_RQ(tstatus) < rq)
303 rq = AGP_MODE_GET_RQ(tstatus);
304 if (AGP_MODE_GET_RQ(mstatus) < rq)
305 rq = AGP_MODE_GET_RQ(mstatus);
306
307 /*
308 * ARQSZ - Set the value to the maximum one.
309 * Don't allow the mode register to override values.
310 */
311 arqsz = AGP_MODE_GET_ARQSZ(mode);
312 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
313 rq = AGP_MODE_GET_ARQSZ(tstatus);
314 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
315 rq = AGP_MODE_GET_ARQSZ(mstatus);
316
317 /* Calibration cycle - don't allow override by mode register */
318 cal = AGP_MODE_GET_CAL(tstatus);
319 if (AGP_MODE_GET_CAL(mstatus) < cal)
320 cal = AGP_MODE_GET_CAL(mstatus);
321
322 /* SBA must be supported for AGP v3. */
323 sba = 1;
324
325 /* Set FW if all three support it. */
326 fw = (AGP_MODE_GET_FW(tstatus)
327 & AGP_MODE_GET_FW(mstatus)
328 & AGP_MODE_GET_FW(mode));
329
330 /* Figure out the max rate */
331 rate = (AGP_MODE_GET_RATE(tstatus)
332 & AGP_MODE_GET_RATE(mstatus)
333 & AGP_MODE_GET_RATE(mode));
334 if (rate & AGP_MODE_V3_RATE_8x)
335 rate = AGP_MODE_V3_RATE_8x;
336 else
337 rate = AGP_MODE_V3_RATE_4x;
338 if (bootverbose)
339 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
340
341 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
342
343 /* Construct the new mode word and tell the hardware */
344 command = AGP_MODE_SET_RQ(0, rq);
345 command = AGP_MODE_SET_ARQSZ(command, arqsz);
346 command = AGP_MODE_SET_CAL(command, cal);
347 command = AGP_MODE_SET_SBA(command, sba);
348 command = AGP_MODE_SET_FW(command, fw);
349 command = AGP_MODE_SET_RATE(command, rate);
350 command = AGP_MODE_SET_AGP(command, 1);
351 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
352 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
353
354 return 0;
355 }
356
357 static int
358 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
359 {
360 u_int32_t tstatus, mstatus;
361 u_int32_t command;
362 int rq, sba, fw, rate;
363
364 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
365 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
366
367 /* Set RQ to the min of mode, tstatus and mstatus */
368 rq = AGP_MODE_GET_RQ(mode);
369 if (AGP_MODE_GET_RQ(tstatus) < rq)
370 rq = AGP_MODE_GET_RQ(tstatus);
371 if (AGP_MODE_GET_RQ(mstatus) < rq)
372 rq = AGP_MODE_GET_RQ(mstatus);
373
374 /* Set SBA if all three can deal with SBA */
375 sba = (AGP_MODE_GET_SBA(tstatus)
376 & AGP_MODE_GET_SBA(mstatus)
377 & AGP_MODE_GET_SBA(mode));
378
379 /* Similar for FW */
380 fw = (AGP_MODE_GET_FW(tstatus)
381 & AGP_MODE_GET_FW(mstatus)
382 & AGP_MODE_GET_FW(mode));
383
384 /* Figure out the max rate */
385 rate = (AGP_MODE_GET_RATE(tstatus)
386 & AGP_MODE_GET_RATE(mstatus)
387 & AGP_MODE_GET_RATE(mode));
388 if (rate & AGP_MODE_V2_RATE_4x)
389 rate = AGP_MODE_V2_RATE_4x;
390 else if (rate & AGP_MODE_V2_RATE_2x)
391 rate = AGP_MODE_V2_RATE_2x;
392 else
393 rate = AGP_MODE_V2_RATE_1x;
394 if (bootverbose)
395 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
396
397 /* Construct the new mode word and tell the hardware */
398 command = AGP_MODE_SET_RQ(0, rq);
399 command = AGP_MODE_SET_SBA(command, sba);
400 command = AGP_MODE_SET_FW(command, fw);
401 command = AGP_MODE_SET_RATE(command, rate);
402 command = AGP_MODE_SET_AGP(command, 1);
403 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
404 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
405
406 return 0;
407 }
408
409 int
410 agp_generic_enable(device_t dev, u_int32_t mode)
411 {
412 device_t mdev = agp_find_display();
413 u_int32_t tstatus, mstatus;
414
415 if (!mdev) {
416 AGP_DPF("can't find display\n");
417 return ENXIO;
418 }
419
420 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
421 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
422
423 /*
424 * Check display and bridge for AGP v3 support. AGP v3 allows
425 * more variety in topology than v2, e.g. multiple AGP devices
426 * attached to one bridge, or multiple AGP bridges in one
427 * system. This doesn't attempt to address those situations,
428 * but should work fine for a classic single AGP slot system
429 * with AGP v3.
430 */
431 if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
432 return (agp_v3_enable(dev, mdev, mode));
433 else
434 return (agp_v2_enable(dev, mdev, mode));
435 }
436
437 struct agp_memory *
438 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
439 {
440 struct agp_softc *sc = device_get_softc(dev);
441 struct agp_memory *mem;
442
443 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
444 return 0;
445
446 if (sc->as_allocated + size > sc->as_maxmem)
447 return 0;
448
449 if (type != 0) {
450 printf("agp_generic_alloc_memory: unsupported type %d\n",
451 type);
452 return 0;
453 }
454
455 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
456 mem->am_id = sc->as_nextid++;
457 mem->am_size = size;
458 mem->am_type = 0;
459 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
460 mem->am_physical = 0;
461 mem->am_offset = 0;
462 mem->am_is_bound = 0;
463 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
464 sc->as_allocated += size;
465
466 return mem;
467 }
468
469 int
470 agp_generic_free_memory(device_t dev, struct agp_memory *mem)
471 {
472 struct agp_softc *sc = device_get_softc(dev);
473
474 if (mem->am_is_bound)
475 return EBUSY;
476
477 sc->as_allocated -= mem->am_size;
478 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
479 vm_object_deallocate(mem->am_obj);
480 free(mem, M_AGP);
481 return 0;
482 }
483
484 int
485 agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
486 vm_offset_t offset)
487 {
488 struct agp_softc *sc = device_get_softc(dev);
489 vm_offset_t i, j, k;
490 vm_page_t m;
491 int error;
492
493 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
494
495 if (mem->am_is_bound) {
496 device_printf(dev, "memory already bound\n");
497 return EINVAL;
498 }
499
500 if (offset < 0
501 || (offset & (AGP_PAGE_SIZE - 1)) != 0
502 || offset + mem->am_size > AGP_GET_APERTURE(dev)) {
503 device_printf(dev, "binding memory at bad offset %#x\n",
504 (int) offset);
505 return EINVAL;
506 }
507
508 /*
509 * Bind the individual pages and flush the chipset's
510 * TLB.
511 *
512 * XXX Presumably, this needs to be the pci address on alpha
513 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
514 * alpha AGP hardware to check.
515 */
516 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
517 /*
518 * Find a page from the object and wire it
519 * down. This page will be mapped using one or more
520 * entries in the GATT (assuming that PAGE_SIZE >=
521 * AGP_PAGE_SIZE. If this is the first call to bind,
522 * the pages will be allocated and zeroed.
523 */
524 VM_OBJECT_LOCK(mem->am_obj);
525 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
526 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
527 VM_OBJECT_UNLOCK(mem->am_obj);
528 if ((m->flags & PG_ZERO) == 0)
529 pmap_zero_page(m);
530 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
531
532 /*
533 * Install entries in the GATT, making sure that if
534 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
535 * aligned to PAGE_SIZE, we don't modify too many GATT
536 * entries.
537 */
538 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
539 j += AGP_PAGE_SIZE) {
540 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
541 AGP_DPF("binding offset %#x to pa %#x\n",
542 offset + i + j, pa);
543 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
544 if (error) {
545 /*
546 * Bail out. Reverse all the mappings
547 * and unwire the pages.
548 */
549 vm_page_lock_queues();
550 vm_page_wakeup(m);
551 vm_page_unlock_queues();
552 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
553 AGP_UNBIND_PAGE(dev, offset + k);
554 VM_OBJECT_LOCK(mem->am_obj);
555 for (k = 0; k <= i; k += PAGE_SIZE) {
556 m = vm_page_lookup(mem->am_obj,
557 OFF_TO_IDX(k));
558 vm_page_lock_queues();
559 vm_page_unwire(m, 0);
560 vm_page_unlock_queues();
561 }
562 VM_OBJECT_UNLOCK(mem->am_obj);
563 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
564 return error;
565 }
566 }
567 vm_page_lock_queues();
568 vm_page_wakeup(m);
569 vm_page_unlock_queues();
570 }
571
572 /*
573 * Flush the cpu cache since we are providing a new mapping
574 * for these pages.
575 */
576 agp_flush_cache();
577
578 /*
579 * Make sure the chipset gets the new mappings.
580 */
581 AGP_FLUSH_TLB(dev);
582
583 mem->am_offset = offset;
584 mem->am_is_bound = 1;
585
586 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
587
588 return 0;
589 }
590
591 int
592 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
593 {
594 struct agp_softc *sc = device_get_softc(dev);
595 vm_page_t m;
596 int i;
597
598 lockmgr(&sc->as_lock, LK_EXCLUSIVE, 0, curthread);
599
600 if (!mem->am_is_bound) {
601 device_printf(dev, "memory is not bound\n");
602 return EINVAL;
603 }
604
605
606 /*
607 * Unbind the individual pages and flush the chipset's
608 * TLB. Unwire the pages so they can be swapped.
609 */
610 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
611 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
612 VM_OBJECT_LOCK(mem->am_obj);
613 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
614 m = vm_page_lookup(mem->am_obj, atop(i));
615 vm_page_lock_queues();
616 vm_page_unwire(m, 0);
617 vm_page_unlock_queues();
618 }
619 VM_OBJECT_UNLOCK(mem->am_obj);
620
621 agp_flush_cache();
622 AGP_FLUSH_TLB(dev);
623
624 mem->am_offset = 0;
625 mem->am_is_bound = 0;
626
627 lockmgr(&sc->as_lock, LK_RELEASE, 0, curthread);
628
629 return 0;
630 }
631
632 /* Helper functions for implementing user/kernel api */
633
634 static int
635 agp_acquire_helper(device_t dev, enum agp_acquire_state state)
636 {
637 struct agp_softc *sc = device_get_softc(dev);
638
639 if (sc->as_state != AGP_ACQUIRE_FREE)
640 return EBUSY;
641 sc->as_state = state;
642
643 return 0;
644 }
645
646 static int
647 agp_release_helper(device_t dev, enum agp_acquire_state state)
648 {
649 struct agp_softc *sc = device_get_softc(dev);
650
651 if (sc->as_state == AGP_ACQUIRE_FREE)
652 return 0;
653
654 if (sc->as_state != state)
655 return EBUSY;
656
657 sc->as_state = AGP_ACQUIRE_FREE;
658 return 0;
659 }
660
661 static struct agp_memory *
662 agp_find_memory(device_t dev, int id)
663 {
664 struct agp_softc *sc = device_get_softc(dev);
665 struct agp_memory *mem;
666
667 AGP_DPF("searching for memory block %d\n", id);
668 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
669 AGP_DPF("considering memory block %d\n", mem->am_id);
670 if (mem->am_id == id)
671 return mem;
672 }
673 return 0;
674 }
675
676 /* Implementation of the userland ioctl api */
677
678 static int
679 agp_info_user(device_t dev, agp_info *info)
680 {
681 struct agp_softc *sc = device_get_softc(dev);
682
683 bzero(info, sizeof *info);
684 info->bridge_id = pci_get_devid(dev);
685 info->agp_mode =
686 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
687 info->aper_base = rman_get_start(sc->as_aperture);
688 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
689 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
690 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
691
692 return 0;
693 }
694
695 static int
696 agp_setup_user(device_t dev, agp_setup *setup)
697 {
698 return AGP_ENABLE(dev, setup->agp_mode);
699 }
700
701 static int
702 agp_allocate_user(device_t dev, agp_allocate *alloc)
703 {
704 struct agp_memory *mem;
705
706 mem = AGP_ALLOC_MEMORY(dev,
707 alloc->type,
708 alloc->pg_count << AGP_PAGE_SHIFT);
709 if (mem) {
710 alloc->key = mem->am_id;
711 alloc->physical = mem->am_physical;
712 return 0;
713 } else {
714 return ENOMEM;
715 }
716 }
717
718 static int
719 agp_deallocate_user(device_t dev, int id)
720 {
721 struct agp_memory *mem = agp_find_memory(dev, id);;
722
723 if (mem) {
724 AGP_FREE_MEMORY(dev, mem);
725 return 0;
726 } else {
727 return ENOENT;
728 }
729 }
730
731 static int
732 agp_bind_user(device_t dev, agp_bind *bind)
733 {
734 struct agp_memory *mem = agp_find_memory(dev, bind->key);
735
736 if (!mem)
737 return ENOENT;
738
739 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
740 }
741
742 static int
743 agp_unbind_user(device_t dev, agp_unbind *unbind)
744 {
745 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
746
747 if (!mem)
748 return ENOENT;
749
750 return AGP_UNBIND_MEMORY(dev, mem);
751 }
752
753 static int
754 agp_open(dev_t kdev, int oflags, int devtype, struct thread *td)
755 {
756 device_t dev = KDEV2DEV(kdev);
757 struct agp_softc *sc = device_get_softc(dev);
758
759 if (!sc->as_isopen) {
760 sc->as_isopen = 1;
761 device_busy(dev);
762 }
763
764 return 0;
765 }
766
767 static int
768 agp_close(dev_t kdev, int fflag, int devtype, struct thread *td)
769 {
770 device_t dev = KDEV2DEV(kdev);
771 struct agp_softc *sc = device_get_softc(dev);
772 struct agp_memory *mem;
773
774 /*
775 * Clear the GATT and force release on last close
776 */
777 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
778 if (mem->am_is_bound)
779 AGP_UNBIND_MEMORY(dev, mem);
780 AGP_FREE_MEMORY(dev, mem);
781 }
782 if (sc->as_state == AGP_ACQUIRE_USER)
783 agp_release_helper(dev, AGP_ACQUIRE_USER);
784 sc->as_isopen = 0;
785 device_unbusy(dev);
786
787 return 0;
788 }
789
790 static int
791 agp_ioctl(dev_t kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
792 {
793 device_t dev = KDEV2DEV(kdev);
794
795 switch (cmd) {
796 case AGPIOC_INFO:
797 return agp_info_user(dev, (agp_info *) data);
798
799 case AGPIOC_ACQUIRE:
800 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
801
802 case AGPIOC_RELEASE:
803 return agp_release_helper(dev, AGP_ACQUIRE_USER);
804
805 case AGPIOC_SETUP:
806 return agp_setup_user(dev, (agp_setup *)data);
807
808 case AGPIOC_ALLOCATE:
809 return agp_allocate_user(dev, (agp_allocate *)data);
810
811 case AGPIOC_DEALLOCATE:
812 return agp_deallocate_user(dev, *(int *) data);
813
814 case AGPIOC_BIND:
815 return agp_bind_user(dev, (agp_bind *)data);
816
817 case AGPIOC_UNBIND:
818 return agp_unbind_user(dev, (agp_unbind *)data);
819
820 }
821
822 return EINVAL;
823 }
824
825 static int
826 agp_mmap(dev_t kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
827 {
828 device_t dev = KDEV2DEV(kdev);
829 struct agp_softc *sc = device_get_softc(dev);
830
831 if (offset > AGP_GET_APERTURE(dev))
832 return -1;
833 *paddr = rman_get_start(sc->as_aperture) + offset;
834 return 0;
835 }
836
837 /* Implementation of the kernel api */
838
839 device_t
840 agp_find_device()
841 {
842 if (!agp_devclass)
843 return 0;
844 return devclass_get_device(agp_devclass, 0);
845 }
846
847 enum agp_acquire_state
848 agp_state(device_t dev)
849 {
850 struct agp_softc *sc = device_get_softc(dev);
851 return sc->as_state;
852 }
853
854 void
855 agp_get_info(device_t dev, struct agp_info *info)
856 {
857 struct agp_softc *sc = device_get_softc(dev);
858
859 info->ai_mode =
860 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
861 info->ai_aperture_base = rman_get_start(sc->as_aperture);
862 info->ai_aperture_size = rman_get_size(sc->as_aperture);
863 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
864 info->ai_memory_allowed = sc->as_maxmem;
865 info->ai_memory_used = sc->as_allocated;
866 }
867
868 int
869 agp_acquire(device_t dev)
870 {
871 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
872 }
873
874 int
875 agp_release(device_t dev)
876 {
877 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
878 }
879
880 int
881 agp_enable(device_t dev, u_int32_t mode)
882 {
883 return AGP_ENABLE(dev, mode);
884 }
885
886 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
887 {
888 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
889 }
890
891 void agp_free_memory(device_t dev, void *handle)
892 {
893 struct agp_memory *mem = (struct agp_memory *) handle;
894 AGP_FREE_MEMORY(dev, mem);
895 }
896
897 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
898 {
899 struct agp_memory *mem = (struct agp_memory *) handle;
900 return AGP_BIND_MEMORY(dev, mem, offset);
901 }
902
903 int agp_unbind_memory(device_t dev, void *handle)
904 {
905 struct agp_memory *mem = (struct agp_memory *) handle;
906 return AGP_UNBIND_MEMORY(dev, mem);
907 }
908
909 void agp_memory_info(device_t dev, void *handle, struct
910 agp_memory_info *mi)
911 {
912 struct agp_memory *mem = (struct agp_memory *) handle;
913
914 mi->ami_size = mem->am_size;
915 mi->ami_physical = mem->am_physical;
916 mi->ami_offset = mem->am_offset;
917 mi->ami_is_bound = mem->am_is_bound;
918 }
Cache object: 1953f5b195f3f122bbc9167b48a64059
|