FreeBSD/Linux Kernel Cross Reference
sys/pci/agp.c
1 /*-
2 * Copyright (c) 2000 Doug Rabson
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/5.3/sys/pci/agp.c 133852 2004-08-16 12:25:48Z obrien $");
29
30 #include "opt_bus.h"
31
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/malloc.h>
35 #include <sys/kernel.h>
36 #include <sys/module.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/ioccom.h>
40 #include <sys/agpio.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcireg.h>
47 #include <pci/agppriv.h>
48 #include <pci/agpvar.h>
49 #include <pci/agpreg.h>
50
51 #include <vm/vm.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_page.h>
54 #include <vm/vm_pageout.h>
55 #include <vm/pmap.h>
56
57 #include <machine/md_var.h>
58 #include <machine/bus.h>
59 #include <machine/resource.h>
60 #include <sys/rman.h>
61
62 MODULE_VERSION(agp, 1);
63
64 MALLOC_DEFINE(M_AGP, "agp", "AGP data structures");
65
66 /* agp_drv.c */
67 static d_open_t agp_open;
68 static d_close_t agp_close;
69 static d_ioctl_t agp_ioctl;
70 static d_mmap_t agp_mmap;
71
72 static struct cdevsw agp_cdevsw = {
73 .d_version = D_VERSION,
74 .d_flags = D_NEEDGIANT,
75 .d_open = agp_open,
76 .d_close = agp_close,
77 .d_ioctl = agp_ioctl,
78 .d_mmap = agp_mmap,
79 .d_name = "agp",
80 };
81
82 static devclass_t agp_devclass;
83 #define KDEV2DEV(kdev) devclass_get_device(agp_devclass, minor(kdev))
84
85 /* Helper functions for implementing chipset mini drivers. */
86
87 void
88 agp_flush_cache()
89 {
90 #if defined(__i386__) || defined(__amd64__)
91 wbinvd();
92 #endif
93 #ifdef __alpha__
94 /* FIXME: This is most likely not correct as it doesn't flush CPU
95 * write caches, but we don't have a facility to do that and
96 * this is all linux does, too */
97 alpha_mb();
98 #endif
99 }
100
101 u_int8_t
102 agp_find_caps(device_t dev)
103 {
104 u_int32_t status;
105 u_int8_t ptr, next;
106
107 /*
108 * Check the CAP_LIST bit of the PCI status register first.
109 */
110 status = pci_read_config(dev, PCIR_STATUS, 2);
111 if (!(status & 0x10))
112 return 0;
113
114 /*
115 * Traverse the capabilities list.
116 */
117 for (ptr = pci_read_config(dev, AGP_CAPPTR, 1);
118 ptr != 0;
119 ptr = next) {
120 u_int32_t capid = pci_read_config(dev, ptr, 4);
121 next = AGP_CAPID_GET_NEXT_PTR(capid);
122
123 /*
124 * If this capability entry ID is 2, then we are done.
125 */
126 if (AGP_CAPID_GET_CAP_ID(capid) == 2)
127 return ptr;
128 }
129
130 return 0;
131 }
132
133 /*
134 * Find an AGP display device (if any).
135 */
136 static device_t
137 agp_find_display(void)
138 {
139 devclass_t pci = devclass_find("pci");
140 device_t bus, dev = 0;
141 device_t *kids;
142 int busnum, numkids, i;
143
144 for (busnum = 0; busnum < devclass_get_maxunit(pci); busnum++) {
145 bus = devclass_get_device(pci, busnum);
146 if (!bus)
147 continue;
148 device_get_children(bus, &kids, &numkids);
149 for (i = 0; i < numkids; i++) {
150 dev = kids[i];
151 if (pci_get_class(dev) == PCIC_DISPLAY
152 && pci_get_subclass(dev) == PCIS_DISPLAY_VGA)
153 if (agp_find_caps(dev)) {
154 free(kids, M_TEMP);
155 return dev;
156 }
157
158 }
159 free(kids, M_TEMP);
160 }
161
162 return 0;
163 }
164
165 struct agp_gatt *
166 agp_alloc_gatt(device_t dev)
167 {
168 u_int32_t apsize = AGP_GET_APERTURE(dev);
169 u_int32_t entries = apsize >> AGP_PAGE_SHIFT;
170 struct agp_gatt *gatt;
171
172 if (bootverbose)
173 device_printf(dev,
174 "allocating GATT for aperture of size %dM\n",
175 apsize / (1024*1024));
176
177 if (entries == 0) {
178 device_printf(dev, "bad aperture size\n");
179 return NULL;
180 }
181
182 gatt = malloc(sizeof(struct agp_gatt), M_AGP, M_NOWAIT);
183 if (!gatt)
184 return 0;
185
186 gatt->ag_entries = entries;
187 gatt->ag_virtual = contigmalloc(entries * sizeof(u_int32_t), M_AGP, 0,
188 0, ~0, PAGE_SIZE, 0);
189 if (!gatt->ag_virtual) {
190 if (bootverbose)
191 device_printf(dev, "contiguous allocation failed\n");
192 free(gatt, M_AGP);
193 return 0;
194 }
195 bzero(gatt->ag_virtual, entries * sizeof(u_int32_t));
196 gatt->ag_physical = vtophys((vm_offset_t) gatt->ag_virtual);
197 agp_flush_cache();
198
199 return gatt;
200 }
201
202 void
203 agp_free_gatt(struct agp_gatt *gatt)
204 {
205 contigfree(gatt->ag_virtual,
206 gatt->ag_entries * sizeof(u_int32_t), M_AGP);
207 free(gatt, M_AGP);
208 }
209
210 static int agp_max[][2] = {
211 {0, 0},
212 {32, 4},
213 {64, 28},
214 {128, 96},
215 {256, 204},
216 {512, 440},
217 {1024, 942},
218 {2048, 1920},
219 {4096, 3932}
220 };
221 #define agp_max_size (sizeof(agp_max) / sizeof(agp_max[0]))
222
223 int
224 agp_generic_attach(device_t dev)
225 {
226 struct agp_softc *sc = device_get_softc(dev);
227 int rid, memsize, i;
228
229 /*
230 * Find and map the aperture.
231 */
232 rid = AGP_APBASE;
233 sc->as_aperture = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
234 RF_ACTIVE);
235 if (!sc->as_aperture)
236 return ENOMEM;
237
238 /*
239 * Work out an upper bound for agp memory allocation. This
240 * uses a heurisitc table from the Linux driver.
241 */
242 memsize = ptoa(Maxmem) >> 20;
243 for (i = 0; i < agp_max_size; i++) {
244 if (memsize <= agp_max[i][0])
245 break;
246 }
247 if (i == agp_max_size) i = agp_max_size - 1;
248 sc->as_maxmem = agp_max[i][1] << 20U;
249
250 /*
251 * The lock is used to prevent re-entry to
252 * agp_generic_bind_memory() since that function can sleep.
253 */
254 mtx_init(&sc->as_lock, "agp lock", NULL, MTX_DEF);
255
256 /*
257 * Initialise stuff for the userland device.
258 */
259 agp_devclass = devclass_find("agp");
260 TAILQ_INIT(&sc->as_memory);
261 sc->as_nextid = 1;
262
263 sc->as_devnode = make_dev(&agp_cdevsw,
264 device_get_unit(dev),
265 UID_ROOT,
266 GID_WHEEL,
267 0600,
268 "agpgart");
269
270 return 0;
271 }
272
273 int
274 agp_generic_detach(device_t dev)
275 {
276 struct agp_softc *sc = device_get_softc(dev);
277 bus_release_resource(dev, SYS_RES_MEMORY, AGP_APBASE, sc->as_aperture);
278 mtx_destroy(&sc->as_lock);
279 destroy_dev(sc->as_devnode);
280 agp_flush_cache();
281 return 0;
282 }
283
284 /*
285 * This does the enable logic for v3, with the same topology
286 * restrictions as in place for v2 -- one bus, one device on the bus.
287 */
288 static int
289 agp_v3_enable(device_t dev, device_t mdev, u_int32_t mode)
290 {
291 u_int32_t tstatus, mstatus;
292 u_int32_t command;
293 int rq, sba, fw, rate, arqsz, cal;
294
295 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
296 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
297
298 /* Set RQ to the min of mode, tstatus and mstatus */
299 rq = AGP_MODE_GET_RQ(mode);
300 if (AGP_MODE_GET_RQ(tstatus) < rq)
301 rq = AGP_MODE_GET_RQ(tstatus);
302 if (AGP_MODE_GET_RQ(mstatus) < rq)
303 rq = AGP_MODE_GET_RQ(mstatus);
304
305 /*
306 * ARQSZ - Set the value to the maximum one.
307 * Don't allow the mode register to override values.
308 */
309 arqsz = AGP_MODE_GET_ARQSZ(mode);
310 if (AGP_MODE_GET_ARQSZ(tstatus) > rq)
311 rq = AGP_MODE_GET_ARQSZ(tstatus);
312 if (AGP_MODE_GET_ARQSZ(mstatus) > rq)
313 rq = AGP_MODE_GET_ARQSZ(mstatus);
314
315 /* Calibration cycle - don't allow override by mode register */
316 cal = AGP_MODE_GET_CAL(tstatus);
317 if (AGP_MODE_GET_CAL(mstatus) < cal)
318 cal = AGP_MODE_GET_CAL(mstatus);
319
320 /* SBA must be supported for AGP v3. */
321 sba = 1;
322
323 /* Set FW if all three support it. */
324 fw = (AGP_MODE_GET_FW(tstatus)
325 & AGP_MODE_GET_FW(mstatus)
326 & AGP_MODE_GET_FW(mode));
327
328 /* Figure out the max rate */
329 rate = (AGP_MODE_GET_RATE(tstatus)
330 & AGP_MODE_GET_RATE(mstatus)
331 & AGP_MODE_GET_RATE(mode));
332 if (rate & AGP_MODE_V3_RATE_8x)
333 rate = AGP_MODE_V3_RATE_8x;
334 else
335 rate = AGP_MODE_V3_RATE_4x;
336 if (bootverbose)
337 device_printf(dev, "Setting AGP v3 mode %d\n", rate * 4);
338
339 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, 0, 4);
340
341 /* Construct the new mode word and tell the hardware */
342 command = AGP_MODE_SET_RQ(0, rq);
343 command = AGP_MODE_SET_ARQSZ(command, arqsz);
344 command = AGP_MODE_SET_CAL(command, cal);
345 command = AGP_MODE_SET_SBA(command, sba);
346 command = AGP_MODE_SET_FW(command, fw);
347 command = AGP_MODE_SET_RATE(command, rate);
348 command = AGP_MODE_SET_AGP(command, 1);
349 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
350 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
351
352 return 0;
353 }
354
355 static int
356 agp_v2_enable(device_t dev, device_t mdev, u_int32_t mode)
357 {
358 u_int32_t tstatus, mstatus;
359 u_int32_t command;
360 int rq, sba, fw, rate;
361
362 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
363 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
364
365 /* Set RQ to the min of mode, tstatus and mstatus */
366 rq = AGP_MODE_GET_RQ(mode);
367 if (AGP_MODE_GET_RQ(tstatus) < rq)
368 rq = AGP_MODE_GET_RQ(tstatus);
369 if (AGP_MODE_GET_RQ(mstatus) < rq)
370 rq = AGP_MODE_GET_RQ(mstatus);
371
372 /* Set SBA if all three can deal with SBA */
373 sba = (AGP_MODE_GET_SBA(tstatus)
374 & AGP_MODE_GET_SBA(mstatus)
375 & AGP_MODE_GET_SBA(mode));
376
377 /* Similar for FW */
378 fw = (AGP_MODE_GET_FW(tstatus)
379 & AGP_MODE_GET_FW(mstatus)
380 & AGP_MODE_GET_FW(mode));
381
382 /* Figure out the max rate */
383 rate = (AGP_MODE_GET_RATE(tstatus)
384 & AGP_MODE_GET_RATE(mstatus)
385 & AGP_MODE_GET_RATE(mode));
386 if (rate & AGP_MODE_V2_RATE_4x)
387 rate = AGP_MODE_V2_RATE_4x;
388 else if (rate & AGP_MODE_V2_RATE_2x)
389 rate = AGP_MODE_V2_RATE_2x;
390 else
391 rate = AGP_MODE_V2_RATE_1x;
392 if (bootverbose)
393 device_printf(dev, "Setting AGP v2 mode %d\n", rate);
394
395 /* Construct the new mode word and tell the hardware */
396 command = AGP_MODE_SET_RQ(0, rq);
397 command = AGP_MODE_SET_SBA(command, sba);
398 command = AGP_MODE_SET_FW(command, fw);
399 command = AGP_MODE_SET_RATE(command, rate);
400 command = AGP_MODE_SET_AGP(command, 1);
401 pci_write_config(dev, agp_find_caps(dev) + AGP_COMMAND, command, 4);
402 pci_write_config(mdev, agp_find_caps(mdev) + AGP_COMMAND, command, 4);
403
404 return 0;
405 }
406
407 int
408 agp_generic_enable(device_t dev, u_int32_t mode)
409 {
410 device_t mdev = agp_find_display();
411 u_int32_t tstatus, mstatus;
412
413 if (!mdev) {
414 AGP_DPF("can't find display\n");
415 return ENXIO;
416 }
417
418 tstatus = pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
419 mstatus = pci_read_config(mdev, agp_find_caps(mdev) + AGP_STATUS, 4);
420
421 /*
422 * Check display and bridge for AGP v3 support. AGP v3 allows
423 * more variety in topology than v2, e.g. multiple AGP devices
424 * attached to one bridge, or multiple AGP bridges in one
425 * system. This doesn't attempt to address those situations,
426 * but should work fine for a classic single AGP slot system
427 * with AGP v3.
428 */
429 if (AGP_MODE_GET_MODE_3(tstatus) && AGP_MODE_GET_MODE_3(mstatus))
430 return (agp_v3_enable(dev, mdev, mode));
431 else
432 return (agp_v2_enable(dev, mdev, mode));
433 }
434
435 struct agp_memory *
436 agp_generic_alloc_memory(device_t dev, int type, vm_size_t size)
437 {
438 struct agp_softc *sc = device_get_softc(dev);
439 struct agp_memory *mem;
440
441 if ((size & (AGP_PAGE_SIZE - 1)) != 0)
442 return 0;
443
444 if (sc->as_allocated + size > sc->as_maxmem)
445 return 0;
446
447 if (type != 0) {
448 printf("agp_generic_alloc_memory: unsupported type %d\n",
449 type);
450 return 0;
451 }
452
453 mem = malloc(sizeof *mem, M_AGP, M_WAITOK);
454 mem->am_id = sc->as_nextid++;
455 mem->am_size = size;
456 mem->am_type = 0;
457 mem->am_obj = vm_object_allocate(OBJT_DEFAULT, atop(round_page(size)));
458 mem->am_physical = 0;
459 mem->am_offset = 0;
460 mem->am_is_bound = 0;
461 TAILQ_INSERT_TAIL(&sc->as_memory, mem, am_link);
462 sc->as_allocated += size;
463
464 return mem;
465 }
466
467 int
468 agp_generic_free_memory(device_t dev, struct agp_memory *mem)
469 {
470 struct agp_softc *sc = device_get_softc(dev);
471
472 if (mem->am_is_bound)
473 return EBUSY;
474
475 sc->as_allocated -= mem->am_size;
476 TAILQ_REMOVE(&sc->as_memory, mem, am_link);
477 vm_object_deallocate(mem->am_obj);
478 free(mem, M_AGP);
479 return 0;
480 }
481
482 int
483 agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
484 vm_offset_t offset)
485 {
486 struct agp_softc *sc = device_get_softc(dev);
487 vm_offset_t i, j, k;
488 vm_page_t m;
489 int error;
490
491 /* Do some sanity checks first. */
492 if (offset < 0 || (offset & (AGP_PAGE_SIZE - 1)) != 0 ||
493 offset + mem->am_size > AGP_GET_APERTURE(dev)) {
494 device_printf(dev, "binding memory at bad offset %#x\n",
495 (int)offset);
496 return EINVAL;
497 }
498
499 /*
500 * Allocate the pages early, before acquiring the lock,
501 * because vm_page_grab() used with VM_ALLOC_RETRY may
502 * block and we can't hold a mutex while blocking.
503 */
504 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
505 /*
506 * Find a page from the object and wire it
507 * down. This page will be mapped using one or more
508 * entries in the GATT (assuming that PAGE_SIZE >=
509 * AGP_PAGE_SIZE. If this is the first call to bind,
510 * the pages will be allocated and zeroed.
511 */
512 VM_OBJECT_LOCK(mem->am_obj);
513 m = vm_page_grab(mem->am_obj, OFF_TO_IDX(i),
514 VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
515 VM_OBJECT_UNLOCK(mem->am_obj);
516 AGP_DPF("found page pa=%#x\n", VM_PAGE_TO_PHYS(m));
517 }
518
519 mtx_lock(&sc->as_lock);
520
521 if (mem->am_is_bound) {
522 device_printf(dev, "memory already bound\n");
523 error = EINVAL;
524 goto bad;
525 }
526
527 /*
528 * Bind the individual pages and flush the chipset's
529 * TLB.
530 *
531 * XXX Presumably, this needs to be the pci address on alpha
532 * (i.e. use alpha_XXX_dmamap()). I don't have access to any
533 * alpha AGP hardware to check.
534 */
535 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
536 VM_OBJECT_LOCK(mem->am_obj);
537 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
538 VM_OBJECT_UNLOCK(mem->am_obj);
539
540 /*
541 * Install entries in the GATT, making sure that if
542 * AGP_PAGE_SIZE < PAGE_SIZE and mem->am_size is not
543 * aligned to PAGE_SIZE, we don't modify too many GATT
544 * entries.
545 */
546 for (j = 0; j < PAGE_SIZE && i + j < mem->am_size;
547 j += AGP_PAGE_SIZE) {
548 vm_offset_t pa = VM_PAGE_TO_PHYS(m) + j;
549 AGP_DPF("binding offset %#x to pa %#x\n",
550 offset + i + j, pa);
551 error = AGP_BIND_PAGE(dev, offset + i + j, pa);
552 if (error) {
553 /*
554 * Bail out. Reverse all the mappings
555 * and unwire the pages.
556 */
557 vm_page_lock_queues();
558 vm_page_wakeup(m);
559 vm_page_unlock_queues();
560 for (k = 0; k < i + j; k += AGP_PAGE_SIZE)
561 AGP_UNBIND_PAGE(dev, offset + k);
562 goto bad;
563 }
564 }
565 vm_page_lock_queues();
566 vm_page_wakeup(m);
567 vm_page_unlock_queues();
568 }
569
570 /*
571 * Flush the cpu cache since we are providing a new mapping
572 * for these pages.
573 */
574 agp_flush_cache();
575
576 /*
577 * Make sure the chipset gets the new mappings.
578 */
579 AGP_FLUSH_TLB(dev);
580
581 mem->am_offset = offset;
582 mem->am_is_bound = 1;
583
584 mtx_unlock(&sc->as_lock);
585
586 return 0;
587 bad:
588 mtx_unlock(&sc->as_lock);
589 VM_OBJECT_LOCK(mem->am_obj);
590 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
591 m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
592 vm_page_lock_queues();
593 vm_page_unwire(m, 0);
594 vm_page_unlock_queues();
595 }
596 VM_OBJECT_UNLOCK(mem->am_obj);
597
598 return error;
599 }
600
601 int
602 agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
603 {
604 struct agp_softc *sc = device_get_softc(dev);
605 vm_page_t m;
606 int i;
607
608 mtx_lock(&sc->as_lock);
609
610 if (!mem->am_is_bound) {
611 device_printf(dev, "memory is not bound\n");
612 mtx_unlock(&sc->as_lock);
613 return EINVAL;
614 }
615
616
617 /*
618 * Unbind the individual pages and flush the chipset's
619 * TLB. Unwire the pages so they can be swapped.
620 */
621 for (i = 0; i < mem->am_size; i += AGP_PAGE_SIZE)
622 AGP_UNBIND_PAGE(dev, mem->am_offset + i);
623 VM_OBJECT_LOCK(mem->am_obj);
624 for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
625 m = vm_page_lookup(mem->am_obj, atop(i));
626 vm_page_lock_queues();
627 vm_page_unwire(m, 0);
628 vm_page_unlock_queues();
629 }
630 VM_OBJECT_UNLOCK(mem->am_obj);
631
632 agp_flush_cache();
633 AGP_FLUSH_TLB(dev);
634
635 mem->am_offset = 0;
636 mem->am_is_bound = 0;
637
638 mtx_unlock(&sc->as_lock);
639
640 return 0;
641 }
642
643 /* Helper functions for implementing user/kernel api */
644
645 static int
646 agp_acquire_helper(device_t dev, enum agp_acquire_state state)
647 {
648 struct agp_softc *sc = device_get_softc(dev);
649
650 if (sc->as_state != AGP_ACQUIRE_FREE)
651 return EBUSY;
652 sc->as_state = state;
653
654 return 0;
655 }
656
657 static int
658 agp_release_helper(device_t dev, enum agp_acquire_state state)
659 {
660 struct agp_softc *sc = device_get_softc(dev);
661
662 if (sc->as_state == AGP_ACQUIRE_FREE)
663 return 0;
664
665 if (sc->as_state != state)
666 return EBUSY;
667
668 sc->as_state = AGP_ACQUIRE_FREE;
669 return 0;
670 }
671
672 static struct agp_memory *
673 agp_find_memory(device_t dev, int id)
674 {
675 struct agp_softc *sc = device_get_softc(dev);
676 struct agp_memory *mem;
677
678 AGP_DPF("searching for memory block %d\n", id);
679 TAILQ_FOREACH(mem, &sc->as_memory, am_link) {
680 AGP_DPF("considering memory block %d\n", mem->am_id);
681 if (mem->am_id == id)
682 return mem;
683 }
684 return 0;
685 }
686
687 /* Implementation of the userland ioctl api */
688
689 static int
690 agp_info_user(device_t dev, agp_info *info)
691 {
692 struct agp_softc *sc = device_get_softc(dev);
693
694 bzero(info, sizeof *info);
695 info->bridge_id = pci_get_devid(dev);
696 info->agp_mode =
697 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
698 info->aper_base = rman_get_start(sc->as_aperture);
699 info->aper_size = AGP_GET_APERTURE(dev) >> 20;
700 info->pg_total = info->pg_system = sc->as_maxmem >> AGP_PAGE_SHIFT;
701 info->pg_used = sc->as_allocated >> AGP_PAGE_SHIFT;
702
703 return 0;
704 }
705
706 static int
707 agp_setup_user(device_t dev, agp_setup *setup)
708 {
709 return AGP_ENABLE(dev, setup->agp_mode);
710 }
711
712 static int
713 agp_allocate_user(device_t dev, agp_allocate *alloc)
714 {
715 struct agp_memory *mem;
716
717 mem = AGP_ALLOC_MEMORY(dev,
718 alloc->type,
719 alloc->pg_count << AGP_PAGE_SHIFT);
720 if (mem) {
721 alloc->key = mem->am_id;
722 alloc->physical = mem->am_physical;
723 return 0;
724 } else {
725 return ENOMEM;
726 }
727 }
728
729 static int
730 agp_deallocate_user(device_t dev, int id)
731 {
732 struct agp_memory *mem = agp_find_memory(dev, id);;
733
734 if (mem) {
735 AGP_FREE_MEMORY(dev, mem);
736 return 0;
737 } else {
738 return ENOENT;
739 }
740 }
741
742 static int
743 agp_bind_user(device_t dev, agp_bind *bind)
744 {
745 struct agp_memory *mem = agp_find_memory(dev, bind->key);
746
747 if (!mem)
748 return ENOENT;
749
750 return AGP_BIND_MEMORY(dev, mem, bind->pg_start << AGP_PAGE_SHIFT);
751 }
752
753 static int
754 agp_unbind_user(device_t dev, agp_unbind *unbind)
755 {
756 struct agp_memory *mem = agp_find_memory(dev, unbind->key);
757
758 if (!mem)
759 return ENOENT;
760
761 return AGP_UNBIND_MEMORY(dev, mem);
762 }
763
764 static int
765 agp_open(struct cdev *kdev, int oflags, int devtype, struct thread *td)
766 {
767 device_t dev = KDEV2DEV(kdev);
768 struct agp_softc *sc = device_get_softc(dev);
769
770 if (!sc->as_isopen) {
771 sc->as_isopen = 1;
772 device_busy(dev);
773 }
774
775 return 0;
776 }
777
778 static int
779 agp_close(struct cdev *kdev, int fflag, int devtype, struct thread *td)
780 {
781 device_t dev = KDEV2DEV(kdev);
782 struct agp_softc *sc = device_get_softc(dev);
783 struct agp_memory *mem;
784
785 /*
786 * Clear the GATT and force release on last close
787 */
788 while ((mem = TAILQ_FIRST(&sc->as_memory)) != 0) {
789 if (mem->am_is_bound)
790 AGP_UNBIND_MEMORY(dev, mem);
791 AGP_FREE_MEMORY(dev, mem);
792 }
793 if (sc->as_state == AGP_ACQUIRE_USER)
794 agp_release_helper(dev, AGP_ACQUIRE_USER);
795 sc->as_isopen = 0;
796 device_unbusy(dev);
797
798 return 0;
799 }
800
801 static int
802 agp_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int fflag, struct thread *td)
803 {
804 device_t dev = KDEV2DEV(kdev);
805
806 switch (cmd) {
807 case AGPIOC_INFO:
808 return agp_info_user(dev, (agp_info *) data);
809
810 case AGPIOC_ACQUIRE:
811 return agp_acquire_helper(dev, AGP_ACQUIRE_USER);
812
813 case AGPIOC_RELEASE:
814 return agp_release_helper(dev, AGP_ACQUIRE_USER);
815
816 case AGPIOC_SETUP:
817 return agp_setup_user(dev, (agp_setup *)data);
818
819 case AGPIOC_ALLOCATE:
820 return agp_allocate_user(dev, (agp_allocate *)data);
821
822 case AGPIOC_DEALLOCATE:
823 return agp_deallocate_user(dev, *(int *) data);
824
825 case AGPIOC_BIND:
826 return agp_bind_user(dev, (agp_bind *)data);
827
828 case AGPIOC_UNBIND:
829 return agp_unbind_user(dev, (agp_unbind *)data);
830
831 }
832
833 return EINVAL;
834 }
835
836 static int
837 agp_mmap(struct cdev *kdev, vm_offset_t offset, vm_paddr_t *paddr, int prot)
838 {
839 device_t dev = KDEV2DEV(kdev);
840 struct agp_softc *sc = device_get_softc(dev);
841
842 if (offset > AGP_GET_APERTURE(dev))
843 return -1;
844 *paddr = rman_get_start(sc->as_aperture) + offset;
845 return 0;
846 }
847
848 /* Implementation of the kernel api */
849
850 device_t
851 agp_find_device()
852 {
853 if (!agp_devclass)
854 return 0;
855 return devclass_get_device(agp_devclass, 0);
856 }
857
858 enum agp_acquire_state
859 agp_state(device_t dev)
860 {
861 struct agp_softc *sc = device_get_softc(dev);
862 return sc->as_state;
863 }
864
865 void
866 agp_get_info(device_t dev, struct agp_info *info)
867 {
868 struct agp_softc *sc = device_get_softc(dev);
869
870 info->ai_mode =
871 pci_read_config(dev, agp_find_caps(dev) + AGP_STATUS, 4);
872 info->ai_aperture_base = rman_get_start(sc->as_aperture);
873 info->ai_aperture_size = rman_get_size(sc->as_aperture);
874 info->ai_aperture_va = (vm_offset_t) rman_get_virtual(sc->as_aperture);
875 info->ai_memory_allowed = sc->as_maxmem;
876 info->ai_memory_used = sc->as_allocated;
877 }
878
879 int
880 agp_acquire(device_t dev)
881 {
882 return agp_acquire_helper(dev, AGP_ACQUIRE_KERNEL);
883 }
884
885 int
886 agp_release(device_t dev)
887 {
888 return agp_release_helper(dev, AGP_ACQUIRE_KERNEL);
889 }
890
891 int
892 agp_enable(device_t dev, u_int32_t mode)
893 {
894 return AGP_ENABLE(dev, mode);
895 }
896
897 void *agp_alloc_memory(device_t dev, int type, vm_size_t bytes)
898 {
899 return (void *) AGP_ALLOC_MEMORY(dev, type, bytes);
900 }
901
902 void agp_free_memory(device_t dev, void *handle)
903 {
904 struct agp_memory *mem = (struct agp_memory *) handle;
905 AGP_FREE_MEMORY(dev, mem);
906 }
907
908 int agp_bind_memory(device_t dev, void *handle, vm_offset_t offset)
909 {
910 struct agp_memory *mem = (struct agp_memory *) handle;
911 return AGP_BIND_MEMORY(dev, mem, offset);
912 }
913
914 int agp_unbind_memory(device_t dev, void *handle)
915 {
916 struct agp_memory *mem = (struct agp_memory *) handle;
917 return AGP_UNBIND_MEMORY(dev, mem);
918 }
919
920 void agp_memory_info(device_t dev, void *handle, struct
921 agp_memory_info *mi)
922 {
923 struct agp_memory *mem = (struct agp_memory *) handle;
924
925 mi->ami_size = mem->am_size;
926 mi->ami_physical = mem->am_physical;
927 mi->ami_offset = mem->am_offset;
928 mi->ami_is_bound = mem->am_is_bound;
929 }
Cache object: 8dbab3869a09d32f5bdf2a65330778bc
|